Query: (randomized or probabilistic) and (algorithm or computation)
Options: case-insensitive, only full words match.
Information on the query syntax.
@TechReport{Lugowski??,
author = "Marek W. Lugowski",
title = "Computational Metabolism",
institution = "Indiana University Computer Science Department",
number = "200",
keywords = "AI16 AI08 AI06 AI04 dynamical locally-coupled
bottom-up architecture",
abstract = "A new architecture for programming of dynamical
systems. It consists of a tessellation into processors
which undergo pairwise swaps. Processors come in
several types; each type recognizes certain other ones.
Recognition events result either in processor state
change or a 2-processor swap. Model combines cellular
automaton and connectionist featrures with
probabilistic computation. Intended application:
representation and computation of metaphors.",
}
@InProceedings{f-figrfps-79,
author = "R. V. Freivalds",
title = "Finite identification of general recursive functions
by probabilistic strategies",
booktitle = "Proceedings of the Conference on Algebraic,
Arithmetic, and Categorial Methods in Computation
Theory",
institution = "Akademie-Verlag",
year = "1979",
pages = "138--145",
}
@Article{g-mgi-79,
author = "B. R. Gaines",
title = "Maryanski's Grammatical Inferencer",
journal = "IEEE Trans. Computers",
volume = "C-27",
number = "1",
month = jan,
year = "1979",
pages = "62--66",
comment = "Corrects some typographical errors in the
Maryanski-Booth algorithm for inferring a probabilistic
finite-state grammar from a given set of strings.",
}
@Article{lrs-iatpfmp-83,
author = "S. E. Levinson and L. R. Rabiner and M. M. Sondhi",
title = "An Introduction to the Application of the Theory of
Probabilistic Functions of a Markov Process to
Automatic Speech Recognition",
journal = "Bell System Technical Journal",
volume = "62",
number = "4",
month = apr,
year = "1983",
pages = "1035--1074",
comment = "Analysis of implementation of Hidden Markov models and
the Baum-Welch algorithm",
}
@InProceedings{cnlp:dasgupta,
title = "Learning capabilities of recurrent neural networks",
author = "B DasGupta",
year = "1992",
volume = "2",
pages = "822--823",
publisher = "IEEE, Piscataway, IEEE Service Center, New Jersey, USA
(IEEE cat n 92CH3094-0)",
ISBN = "0734-7502 0-7803-0494-2",
address = "Dept of Comput Sci, Pennsylvania State Univ,
University Park, PA, USA,",
booktitle = "Proceedings of the IEEE Southeastcon '92 Birmingham,
AL, USA",
abstract = "The author relates the power of recurrent neural
networks to those of other conventional models of
computation like Turing machines and finite automata,
and proves results about their learning capabilities.
Specifically, it is shown that (a) probabilistic
recurrent networks and probabilistic Turing machine
models are equivalent; (b) probabilistic recurrent
networks with bounded error probabilities are not more
powerful than deterministic finite automata; (c)
deterministic recurrent networks have the capability of
learning P-complete language problems; and (d)
restricting the weight-threshold relationship in
deterministic recurrent networks may allow the network
to learn only weaker classes of languages. 4 Refs",
keywords = "Neural networks Learning systems Turing machines
Finite automata Probability Computer programming
languages Recurrent neural networks Probabilistic
neural networks Deterministic recurrent networks",
}
@Article{Montana??,
author = "David Montana",
title = "A Weighted Probabilistic Neural Network",
journal = "NIPS4",
keywords = "genetic algorithms, connectionism, cogann ref",
abstract = "Abstract: The Probabilistic Neural Network (PNN)
algorithm represents the likelihood function of a given
class as the sum of identical, isotropic Gaussians. In
practice, PNN is often an excellent pattern classifier,
outperforming other classifiers including
backpropagation. However, it is not robust with respect
to affine transformations of feature space, and this
can lead to poor performance on certain data. We have
derived an extension of PNN called Weighted PNN (WPNN)
which compensates for this flaw by allowing anisotropic
Gaussians, i.e. Gaussians whose covariance is not a
multiple of the identity matrix. The covariance is
optimized using a genetic algorithm, some interesting
features of which are its redundant, logarithmic
encoding and large population size. Experimental
results validate our claims.",
}
@InProceedings{KESS:PAU:RAU:1991,
author = "Christoph W. Kessler and W. J. Paul and T. Rauber",
title = "{\em``Register Allocation and Code Optimization for
Vector Basic Blocks on Vector Processors''}",
booktitle = "{``Code Generation --- Concepts, Tools, Techniques'',
Proceedings of the International Workshop on Code
Generation, Dagstuhl, Germany, 20-24 May 1991}",
editor = "Robert Giegerich and Susan L. Graham",
series = "Workshops in Computing",
year = "1991",
pages = "73--91",
publisher = "Springer-Verlag",
note = "ISBN 3-540-19757-5 and 3-387-19757-5",
abstract = "We present a randomized heuristic algorithm to
generate continuous evaluations for expression DAGs
with nearly minimal register need. The heuristic may be
used to reorder the statements in a basic block before
applying a global register allocation scheme like graph
coloring. Experiments have shown that the new heuristic
produces results about 30\% better on the average than
without reordering. Basic blocks of vector instructions
lead to vector DAGs. For the special class of
quasiscalar DAGS, the problem can be reduced to the
scalar case handled above provided that some machine
constraints such as buffer size and pipeline depth are
taken into consideration. Theorectical considerations
show that there exists an interesting tradeoff-effect
between strip miningan vector register spilling.
Therefore we give an algorithm which computes the best
ratio of spill rate to strip length with respect to the
run time on the target vector processor which is given
by some architecture parameters. This algorithm is
suited for vector processors containing a buffer
(register file) which may be partitioned arbitrarily by
the user.",
}
@Article{LynchEtAl82,
author = "N. A. Lynch and N. Griffeth and M. J. Fisher and L. J.
Guibas",
title = "Probabilistic Analysis of a Network Resource
Allocation Algorithm",
journal = "Draft from Georgia Inst.of Technology",
year = "1982",
}
@Article{Gordon88,
author = "Michael Gordon",
title = "Probabilistic and Genetic Algorithms for Document
Retrieval",
journal = "Communications of the ACM",
volume = "31",
number = "10",
year = "1988",
month = oct,
annote = "Genetic algorithm repeats a two-step process: Measure
the performance of competing documents descriptions;
Replace the set of descriptions, until some criterion
is attained.",
}
@TechReport{IOANNIDIS90,
key = "Ioannidis \& Kang",
author = "Y. E. Ioannidis and Y. Kang",
title = "Randomized Algorithms for Optimizing Large Join
Queries",
institution = "University of Wisconsin",
address = "Madison, Wisconsin",
year = "1990",
abstract = "Query optimization for relational database systems is
a combinatorial optimization problem, which makes
exhaustive search unacceptable as the query size grows.
Randomized algorithms, such as Simulated Annealing and
Iterative Improvement, are viable alternatives to
exhaustive search. We have adapted these algorithms to
the optimization of project-select-join queries. We
have tested them on large queries of various types with
different databases. The study of the shape of the cost
function over the solution space associated with such
queries and the analysis of the behavior of these
algorithms have inspired a new Hybrid algorithm, which
is a combination of Simulated Annealing and Iterative
Improvement. Experimental results show that Hybrid
outperforms the original algorithms in terms of both
output quality and running time.",
bibdate = "Thu May 3 15:46:53 1990",
owner = "robyn",
}
@TechReport{TSANGARIS91,
key = "Tsangaris \& Naughton",
author = "M. Tsangaris and J. Naughton",
title = "A Stochastic Approach for Clustering in Object Sores",
number = "1017",
institution = "University of Wisconsin",
year = "1991",
month = mar,
abstract = "Object clustering has long been recognized as
important to the performance of object bases, but in
most work to date, it is not clear exactly what is
being optimized or how optimal are the solutions
obtained. We give a rigorous treatment of a fundamental
problem in clustering: given an object base and a
probabilistic description of the expected access
patterns, what is an optimal object clustering, and how
can this optimal clustering be found or approximated?
We present a system model for the clustering problem
and discuss two models for access patterns in the
system. For the first, exact optimal clustering
strategies can be found; for the second, we show that
the problem is NP--complete, but that it is an instance
of a well--studied graph partitioning problem. We
propose a new clustering algorithm based upon
Kernighan's heuristic graph partitioning algorithm, and
present an experimental comparison of this new
clustering algorithm with several previously proposed
clustering algorithms.",
bibdate = "Tue Sep 17 13:51:26 1991",
owner = "soo",
}
@TechReport{Barak84,
author = "A. Barak and Z. Drezner",
title = "A Probabilistic Algorithm for Scattering Information
in a Multicomputer System",
institution = "Univ. of Michigan",
type = "Rep.",
number = "CRL-TR-15-84",
address = "Ann Arbor, Mi",
month = mar,
year = "1984",
}
@Article{Shier1988,
author = "D. R. Shier",
title = "A New Algorithm for Performance Analysis of
Communication Systems",
journal = "IEEE trans. on commun.",
volume = "COM 36, 4",
year = "1988",
pages = "516--527",
references = "6",
country = "USA",
language = "English",
enum = "3682",
descriptors = "performance evaluation; communication system;
analysis;",
date = "15/01/90",
by_date = "DS",
revision = "21/04/91",
by_rev = "Le",
location = "FHL; UniDo-ESV",
annote = "This paper presents a new algorithm for generating in
order the most likely states of a probabilistic system.
thus enabling a more rapid procedure for analyzing the
performance of communication networks with
stochastically failing components. The new algorithm
improves upon the algorithm recently reported in [1],
both in terms of storage requirements and execution
efficiency.",
}
@Article{Mitra1991,
author = "D. Mitra and J. Seery",
title = "Comparative Evaluations of Randomized and Dynamic
Routing Strategies for Circuit-Switched Networks",
journal = "IEEE trans. on commun.",
volume = "COM-39, 1",
year = "1991",
pages = "102--116",
references = "0",
country = "USA",
language = "English",
enum = "4834",
descriptors = "Alternate routing; dynamic routing; circuit switching;
simulation model;",
date = "05/12/91",
by_date = "Uhl",
revision = "07/04/92",
by_rev = "Le",
location = "UniDo-ESV",
annote = "The theory and practice of circuit switching on
networks has recently been rapidly evolving. We
investigate two fundamentallyseparate classes of
routing algorithms-randomized and deterministic. The
main randomized algorithm is Gibben's and Kelly's
recently introduced Dynamic Alternate Routing. In the
contrasting deterministic algorithm, attempts to carry
a call are made in a specific precomputed order.",
}
@TechReport{Seidel1990,
author = "R. Seidel",
title = "A Simple and Fast Incremental Randomized Algorithm for
Computing Trapezoidal Decompositions and for
Triangulating Polygons",
publisher = "Reports from the {"}Institute for Computer Science{"},
Department of Mathematics, Freie Universität Berlin",
volume = "B-90-07",
year = "1990",
pages = "1--16",
references = "13",
town = "Berlin",
country = "D",
language = "English",
enum = "8487",
descriptors = "Algorithm;",
date = "31/01/94",
by_date = "IDR",
revision = "18/02/94",
by_rev = "Le",
location = "PKI-OEG: Lit-Fach Idr",
annote = "This paper presents a very simple incremental
randomized algorithm for computing the trapezoidal
decomposition induced by a set S of n line segments in
the plane. If S is given as a simple polygone expected running time of the algorithm is O(n log*
n). This leads to a simple algorithm of the same
complexity for triangulating polygons.",
}
@Article{Coffman1984,
author = "E. G. Coffman",
title = "Recent progress in the performance evaluation of
fundamental allocation algorithms",
journal = "ACM perf. eval. rev. (SIGMETRICS)",
volume = "12, 3",
year = "1984",
pages = "2--6",
references = "10",
country = "USA",
language = "English",
enum = "610",
descriptors = "Performance evaluation; information system; operating
system; memory management; allocation; algorithm;",
date = "16/11/84",
by_date = "Sü",
revision = "21/04/91",
by_rev = "Le",
location = "RWTH-AC-DFV: Bibl.",
annote = "OUR UNDERSTANDING OF SEVERAL ALLOCATION ALGORITHMS
BASIC TO OPERATING SYSTEMS AND TO DATA BASE SYSTEMS HAS
IMPROVED SUBSTANTIALLY AS A RESULTS OF A NUMBER OF
RESEARCH EFFORTS WITHIN THE PAST ONE OR TWO
YEARS......... WE REFER TO PROOFS THAT CERTAIN
CLASSICAL ALGORITHMS DESCRIBED AS APPROXIMATE ARE IN
FACT OPTIMAL IN A STRONG PROBABILISTIC SENSE. THE WORK
DISCUSSED HERE WILL BE CLASSIFIED ACCORDING TO THE
APPLICATION AREAS, ARCHIVAL AND DYNAMIC",
}
@TechReport{Lam1981,
author = "S. S. Lam and Y. L. Liem",
title = "An analysis of the tree convolution algorithm for
queueing networks",
publisher = "University of Texas, tech. report no. 78712",
year = "1981",
references = "0",
town = "Austin, Tx",
country = "USA",
language = "English",
enum = "1863",
descriptors = "Queueing system; queueing network; evaluation; method;
sparse matrix; iterative method; stationary process;
binomial distribution; exponential distribution;
exponential queueing network; network throughput;
network delay; routing algorithm;",
date = "27/05/81",
by_date = "Le",
revision = "21/04/91",
by_rev = "Le",
location = "TUDD-IfN-TK: Li-Ord.Le; RWTH-AC-DFV: TELL",
annote = "AN ANALYSIS OF THE TREE CONVOLUTION ALGORITHM FOR THE
SOLUTION OF PRODUCT-FORM QUEUEING NETWORKS IS
PRESENTED. A PROBABILISTIC MODEL IS USED TO GENERATE
NETWORKS WITH M SERVICE CENTERS, K ROUTING CHAINS AND N
CUSTOMERS IN EACH CHAIN. THE ROUTES OF CHAINS ARE
SAMPLED FROM INDEPENDENT BERNOULLI TRIALS. THE EXPECTED
TIME AND SPACE REQUIREMENTS FOR THE COMPUTATION OF
NETWORK NORMALIZATION CONSTANTS AND THE PERFORMANCE
MEASURES OF CHAIN THROUGHPUTS,",
}
@Article{Badr1989,
author = "H. G. Badr and S. Podar",
title = "An optimal shortest-path routing policy for network
computers with regular mesh-connected topologies",
journal = "IEEE trans. on comp.",
volume = "C-38, 10",
year = "1989",
pages = "1362--1371",
references = "22",
country = "USA",
language = "English",
enum = "162",
descriptors = "Routing algorithm; shortest path;",
date = "23/11/89",
by_date = "Le",
revision = "21/04/91",
by_rev = "Le",
location = "PKI-OEG: Jt",
annote = "We present a new probabilistic routing policy, {"}Z
Routing Policywithin the class of nonadaptive,
shortest-path routing policies for regular
mesh-connected topologies such as n-dimensional toroids
and hypercubes. The focus of our attention is routing
in networks of computers in a distributed computing
environment, the so-called {"}network
computers{"}....",
}
@Article{Kruskal1969,
author = "J. B. Kruskal",
title = "Work-Scheduling Algorithms: {A} Nonprobabilistic
Queuing Study (with Possible Application to No. 1
{ESS})",
journal = "BSTJ",
year = "1969",
pages = "2963--2974",
references = "3",
country = "USA",
language = "English",
enum = "3591",
descriptors = "Priority; model; algorithm; evaluation; approximation;
method;",
date = "19/02/90",
by_date = "Ha",
revision = "21/04/91",
by_rev = "Le",
location = "TUDD-IfN-TK: Li-Ord.Le",
annote = "This paper provides an approximate method for
evaluating differ-ent cycles. Using the evaluation
method and some approximations,we obtain a formula for
the optimum relative frequency with which different
queues should be visited. The model used is
non-probabilistic, and treats requests as continuous
rather than discrete. The model also ignores certain
interdependencies bet- ween queues.",
}
@InProceedings{Dimitrijevic??,
author = "Dragomir D. Dimitrijevic and Mon-Song Chen",
title = "Dynamic State Exploration in Quantitative Protocol
Analysis",
booktitle = "Proc. 9th Intl. Symp. on Protocol Specification,
Testing and Verification (IFIP WG 6.1)",
address = "Enschede, The Netherlands",
year = "June 6-9",
abstract = "{\bf Abstract:} This paper extends the dynamic state
exploration technique used in an integrated
probabilistic protocol verification and evaluation
procedure [7]. The extension reduces the complexity
from {\it O( n {\footnotesize
\raisebox{1ex}{2m}$\backslash$s+2)} to {\it O(n
{\footnotesize \raisebox{1ex}{3}})}, where n and m are
the numbers of generated states and explored
transitions, respectively. Additional properties of the
technique are also analyzed to further enhance the
verification and evaluation procedure. The procedure
based on this technique is unique in that (1) it
evaluates the importance of states in the course of
global reachability graph (GRG) generation, (2) it,
based on the importance of states, explores only the
most interesting subset of states, and (3) it computes
important reliability and performance measures such as
Mean Time To Unknown and Confidence Level, which is the
probability sum of checked scenarios, and uses them as
the stopping criteria. The procedure is demonstrated
using the call establishment phase of X.75 protocol.
.sp 0.4 [7] Dimitrijevic, D. D. and M-S. Chen, ``An
Integrated Algorithm for Probabilistic Protocol
Verification and Evaluation,'' Proc. INFOCOM'89}",
}
@Article{Hofri84,
author = "M. Hofri",
title = "A Probabilistic Analysis of the Next-Fit Bin Packing
Algorithm",
year = "1984",
journal = "J. Algorithms",
volume = "5",
institution = "Technion",
pages = "547--556",
keywords = "IMAGE PART PATTERN",
}
@Article{Dillencourt:1992:RAS,
author = "M. B. Dillencourt and D. M. Mount and N. S.
Netanyahu",
title = "A randomized algorithm for slope selection",
journal = "Internat. J. Comput. Geom. Appl.",
volume = "2",
year = "1992",
pages = "1--27",
keywords = "slope selection, randomized algorithm, Theil Sen
estimator, line fitting, robust estimation",
}
@Article{rn:KargerStein:93,
author = "D. R. Karger and C. Stein",
title = "An {O}~($n^2$) Algorithm for Minimum Cuts",
journal = "25th Annual ACM STOC",
year = "1993",
pages = "757--765",
language = "english",
keywords = "minimum cut, randomized algorithms",
}
@InCollection{int:Alizadeh1,
author = "F. Alizadeh",
title = "A sublinear--time randomized parallel algorithm for
the maximum clique problem in perfect graphs",
booktitle = "Proceedings of the Second ACM--SIAM Symposium on
Discrete Algorithms",
year = "1991",
month = jan,
pages = "188--194",
address = "San Francisco, CA, USA",
}
@TechReport{int:Anstreicher27,
author = "K. M. Anstreicher and J. Ji and F. A. Potra and Y.
Ye",
title = "Probabilistic analysis of an infeasible primal--dual
algorithm for linear programming",
type = "{Reports on Computational Mathematics}",
number = "27",
year = "1992",
month = jul,
institution = "Dept.\ of Mathematics, University of Iowa",
address = "Iowa City, IA~52242, USA",
}
@Article{Bentley:1980:GSL,
author = "Jon Louis Bentley and James B. Saxe",
title = "Generating Sorted Lists of Random Numbers",
journal = "ACM Transactions on Mathematical Software",
volume = "6",
number = "3",
pages = "359--364",
month = sep,
year = "1980",
acknowledgement = ack-nhfb,
bibdate = "Mon Aug 29 10:57:24 1994",
keywords = "random number generation, sorting, probabilistic
methods in algorithm design, linear-time algorithm",
}
@Article{Corana:1987:MMF,
author = "A. Corana and M. Marchesi and C. Martini and S.
Ridella",
title = "Minimizing Multimodal Functions of Continuous
Variables with the ``Simulated Annealing'' Algorithm",
journal = "ACM Transactions on Mathematical Software",
volume = "13",
number = "3",
pages = "262--280",
month = sep,
year = "1987",
ISSN = "0098-3500",
note = "See also \cite{Corana:1989:CMF}.",
acknowledgement = ack-nhfb,
bibdate = "Sat Nov 19 13:08:34 1994",
keywords = "algorithms; performance",
review = "ACM CR 8804-0282",
subject = "G.1.6 Mathematics of Computing, NUMERICAL ANALYSIS,
Optimization, Nonlinear programming \\ G.4 Mathematics
of Computing, MATHEMATICAL SOFTWARE, Certification and
testing \\ G.3 Mathematics of Computing, PROBABILITY
AND STATISTICS, Probabilistic algorithms (including
Monte Carlo) \\ F.2.2 Theory of Computation, ANALYSIS
OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical
Algorithms and Problems, Sorting and searching",
}
@Article{Li:1994:RSA,
author = "Kim-Hung Li",
title = "Reservoir Sampling Algorithms of Time Complexity
{$O(n(1+\log(N/n)))$}",
journal = "ACM Transactions on Mathematical Software",
volume = "20",
number = "4",
month = dec,
year = "1994",
pages = "481--493",
keywords = "analysis of algorithms; random sampling; reservoir",
subject = "G.3 [Mathematics of Computing]: Probability and
Statistics -- probabilistic algorithms; random number
generation; statistical software; G.4 [Mathematics of
Computing]: Mathematical Software -- algorithm
analysis",
acknowledgement = ack-rfb,
bibdate = "Tue Mar 14 16:16:57 1995",
}
@InProceedings{Kaminski1981,
author = "M. Kaminski",
title = "Note on probabilistic algorithms in integer and
polynomila arithmetic",
pages = "117",
editor = "P. Wang",
year = "1981",
booktitle = "SYMSAC '81: Proceedings of the 1981 ACM Symposium on
Symbolic and Algebraic Computation",
organization = "Association for Computing Machinery",
}
@InProceedings{bus:KuriBaeckHeitkoetter:94,
author = "Sami Khuri and Thomas B{\"a}ck and J{\"o}rg
Heitk{\"o}tter",
title = "An Evolutionary Approach to Combinatorial Optimization
Problems",
booktitle = "Proceedings of the 1994 Computer Science Conference
(CSC'94)",
year = "1994",
pages = "(to appear)",
publisher = "ACM Press",
organization = "March 1994, Phoenix AZ",
abstract = "The paper reports on the application of genetic
algorithms, probabilistic search algorithms based on
the model of organic evolution, to NP-complete
combinatorial optimization problems. In particular, the
subset sum, maximum cut, and minimum tardy task
problems are considered. Except for the fitness
function, no problem-specific changes of the genetic
algorithm are required in order to achieve results of
high quality even for the problem instances of size 100
used in the paper. For constrained problems, such as
the subset sum and the minimum tardy task, the
constraints are taken into account by incorporating a
graded penalty term into the fitness function. Even for
large instances of these highly multimodal optimization
problems, an iterated application of the genetic
algorithm is observed to find the global optimum within
a number of runs. As the genetic algorithm samples only
a tiny fraction of thesearch space, these results are
quite encouraging.",
}
@Article{Frenkel:1986:CPP,
author = "Karen A. Frenkel",
title = "Complexity and parallel processing: an interview with
{Richard Karp}",
journal = "Communications of the ACM",
volume = "29",
number = "2",
pages = "112--117",
month = feb,
year = "1986",
ISSN = "0001-0782",
acknowledgement = ack-nhfb,
bibdate = "Sun Aug 14 18:32:13 MDT 1994",
keywords = "performance; theory; human factors",
subject = "K.2 Computing Milieux, HISTORY OF COMPUTING \\ G.2.1
Mathematics of Computing, DISCRETE MATHEMATICS,
Combinatorics \\ F.0 Theory of Computation, GENERAL \\
F.1.1 Theory of Computation, COMPUTATION BY ABSTRACT
DEVICES, Models of Computation, Computability theory \\
F.1.2 Theory of Computation, COMPUTATION BY ABSTRACT
DEVICES, Modes of Computation, Parallelism \\ F.2.0
Theory of Computation, ANALYSIS OF ALGORITHMS AND
PROBLEM COMPLEXITY, General \\ G.2.2 Mathematics of
Computing, DISCRETE MATHEMATICS, Graph Theory \\ F.1.2
Theory of Computation, COMPUTATION BY ABSTRACT DEVICES,
Modes of Computation, Probabilistic computation \\
F.1.3 Theory of Computation, COMPUTATION BY ABSTRACT
DEVICES, Complexity Classes, Reducibility and
completeness \\ F.1.3 Theory of Computation,
COMPUTATION BY ABSTRACT DEVICES, Complexity Classes,
Relations among complexity classes",
}
@Article{Karp:1986:CCR,
author = "Richard M. Karp",
title = "Combinatorics, complexity, and randomness",
journal = "Communications of the ACM",
volume = "29",
number = "2",
pages = "98--109",
month = feb,
year = "1986",
ISSN = "0001-0782",
note = "This is the 1985 Turing Award Lecture. It traces the
development of combinatorial optimization and
computational complexity theory. It discusses
probabilistic algorithms and probabilistic analysis of
approximation algorithms for {\em NP}-complete
optimization problems.",
acknowledgement = ack-nhfb,
bibdate = "Sun Aug 14 18:32:13 MDT 1994",
bibsource = "ftp://ftp.ira.uka.de/pub/bibliography/Theory/ProbAlgs.bib",
keywords = "algorithms; performance; theory",
review = "ACM CR 8610-0921",
subject = "F.1.0 Theory of Computation, COMPUTATION BY ABSTRACT
DEVICES, General \\ G.2.1 Mathematics of Computing,
DISCRETE MATHEMATICS, Combinatorics \\ K.2 Computing
Milieux, HISTORY OF COMPUTING, People \\ F.0 Theory of
Computation, GENERAL \\ F.2.0 Theory of Computation,
ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, General
\\ G.2.2 Mathematics of Computing, DISCRETE
MATHEMATICS, Graph Theory \\ F.1.3 Theory of
Computation, COMPUTATION BY ABSTRACT DEVICES,
Complexity Classes, Reducibility and completeness \\
F.1.3 Theory of Computation, COMPUTATION BY ABSTRACT
DEVICES, Complexity Classes, Relations among complexity
classes \\ K.2 Computing Milieux, HISTORY OF
COMPUTING",
}
@Article{Hester:1987:SOS,
author = "J. H. Hester and D. H. Hirschberg",
title = "Self-organizing search lists using probabilistic
back-pointers",
journal = "Communications of the ACM",
volume = "30",
number = "12",
pages = "1074--1079",
month = dec,
year = "1987",
ISSN = "0001-0782",
acknowledgement = ack-nhfb,
bibdate = "Sun Aug 14 18:32:13 MDT 1994",
keywords = "algorithms",
review = "ACM CR 8810-0786",
subject = "E.1 Data, DATA STRUCTURES, Lists \\ F.2.2 Theory of
Computation, ANALYSIS OF ALGORITHMS AND PROBLEM
COMPLEXITY, Nonnumerical Algorithms and Problems,
Sorting and searching \\ G.3 Mathematics of Computing,
PROBABILITY AND STATISTICS, Probabilistic algorithms
(including Monte Carlo)",
}
@Article{Gordon:1988:PGA,
author = "M. Gordon",
title = "Probabilistic and Genetic Algorithms in Document
Retrieval",
journal = "Communications of the ACM",
volume = "31",
number = "10",
pages = "1208--1218 (or 1208--1219??)",
month = oct,
year = "1988",
ISSN = "0001-0782",
acknowledgement = ack-nhfb,
bibdate = "Sat Sep 24 11:12:23 1994",
keywords = "algorithms; design; measurement; performance",
subject = "F.2.2 Theory of Computation, ANALYSIS OF ALGORITHMS
AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and
Problems, Sorting and searching \\ H.3.3 Information
Systems, INFORMATION STORAGE AND RETRIEVAL, Information
Search and Retrieval, Query formulation \\ H.3.3
Information Systems, INFORMATION STORAGE AND RETRIEVAL,
Information Search and Retrieval, Search process",
}
@Article{Fox:1992:MPH,
author = "Edward A. Fox and Lenwood S. Heath and Qi Fan Chen and
Amjad M. Daoud",
title = "Minimal Perfect Hash Functions for Large Databases",
journal = "Communications of the ACM",
volume = "35",
number = "1",
pages = "105--121",
month = jan,
year = "1992",
note = "This is the first published algorithm for computing
minimal perfect hash functions for lists of millions of
words; previous algorithms were computationally
infeasible for more than a few hundred words.",
acknowledgement = ack-nhfb,
bibdate = "Thu May 20 17:19:08 1993",
bibsource = "ftp://ftp.ira.uka.de/pub/bibliography/Theory/ProbAlgs.bib",
note2 = "This paper presents two randomized algorithm for
minimal perfect hashing functions that are designed for
use with data bases with as many as a million keys. The
algorithms have been experimentally evaluated. The
first algorithm generates hash functions that are less
than $O(n)$ computer words long, and the second
generates functions that approach the theoretical lower
bound of $\Omega(n/\log{n})$ words. This work is a
predecessor of \cite{Fox:1991:SEDcb}.",
}
@InProceedings{Fuhr88,
author = "Norbert Fuhr and Hubert Huther",
title = "Optimum Probability Estimation Based on Expectations",
booktitle = "Proceedings of the Eleventh Annual International ACM
SIGIR Conference on Research and Development in
Information Retrieval",
series = "Quantitative Models (2)",
pages = "257--273",
year = "1988",
copyright = "(c) Copyright 1988 Association for Computing
Machinery",
abstract = "Probability estimation is important for the
application of probabilistic models as well as for any
evaluation in IR. We discuss the interdependencies
between parameter estimation and other properties of
probabilistic models. Then we define an optimum
estimate which can be applied to various typical
estimation problems in IR. A method for the computation
of this estimate is described which uses expectations
from empirical distributions. Some experiments show the
applicability of our method, whereas comparable
approaches are partially based on false assumptions or
yield estimates with systematic errors.",
}
@Article{Johnson93,
author = "C. W. Johnson",
title = "A Probabilistic Logic for the Development of
Safety-Critical, Interactive Systems",
journal = "International Journal of Man-Machine Studies",
volume = "39",
number = "2",
pages = "333--351",
year = "1993",
copyright = "(c) Copyright 1993 Academic Press",
abstract = "This paper starts from the premise that the human
contribution to risk must be assessed during the
development of safety-critical systems. In contrast to
previous approaches, discrete numerical values are
rejected as means of quantifying the probability of
operator {"}error{"} for many different users of many
different systems. Numerical probabilities are used to
rank the importance that designers attach to par ocular
system failures. Adequate development resources must be
allocated so that operators will resolve and not
exacerbate high-priority failures. In order to do this,
human factors and systems engineers must be provided
with notations that can represent risk assessments.
Many techniques that are in widespread use, such as
fault-tree analysis, provide inadequate support for the
development of interactive systems. They do not capture
the temporal properties that can determine the quality
of interaction between operators and stochastic
application processes. It is argued that probabilistic
temporal logics avoid this limitation. Notations which
are built around linear models of time cannot easily
capture the semantics of risk assessments. We have
developed Probabilistic Computation Tree Logic (PCTL)
to avoid this problem. PCTL is built around a branching
model of time. Finally, it is argued that PCTL
specifications and Monte Carlo techniques can be used
to provide faithful simulations of stochastic
interactive systems. The implementation of the Risklog
prototyping tool is briefly described. Partial
simulations can be shown to system operators in order
to determine whether they are likely to intervene and
resolve system failures.",
}
@Article{Hadjimichael93,
author = "Michael Hadjimichael and Anita Wasilewska",
title = "Interactive Inductive Learning",
journal = "International Journal of Man-Machine Studies",
volume = "38",
number = "2",
pages = "147--167",
year = "1993",
copyright = "(c) Copyright 1993 Academic Press",
abstract = "We propose an interactive probabilistic inductive
learning model which defines a feedback relationship
between the user and the learning program. We extend
previously described learning algorithms to a
conditional model previously described by the authors,
and formulate our Conditional Probabilistic Learning
Algorithm (CPLA), applying conditions as introduced by
Wasilewska to a probabilistic version of the work of
Wong and Wong. We propose the Condition Suggestion
Algorithm (CSA) as a way to use the syntactic knowledge
in the system to generalize the family of decision
rules. We also examine the semantic knowledge of the
system implied by the suggested conditions and analyse
the effects of conditions on the system. CPLA/CSA has
been implemented by the first author and was used to
generate the examples presented.",
}
@Article{Tokuda93,
author = "N. Tokuda and A. Fukuda",
title = "A Probabilistic Inference Scheme for Hierarchical
Buggy Models",
journal = "International Journal of Man-Machine Studies",
volume = "38",
number = "5",
pages = "857--872",
year = "1993",
copyright = "Copyright 1993 Academic Press",
abstract = "The probabilistic reasoning scheme of Dempster-Shafer
theory provides a remarkably efficient bug
identification algorithm for a hierarchical Buggy
model. In the particular Buggy model generated by the
repair theory of Brown \& Van Lehn (1980, A generative
theory of bugs in procedural skills, Cognitive Science,
2, 155-192), both Shafer \& Logan (1987, Implementing
Dempster's rule for hierarchical evidence, Artificial
Intelligence, 33, 271-298) and Gordon \& Shortliffe
(1985, A method for managing evidential reasoning in a
hierarchical hypothesis space, Artificial Intelligence,
26, 324-357) schemes have provided almost identical
computational accuracy although the latter involves an
approximation of a {"}smallest superset{"}. If n
denotes the number of bugs to be identified, the
computational complexity of the two schemes, originally
of O(n$\lbrace$sup:4/3$\rbrace$) and
O(n$\lbrace$squared$\rbrace$) respectively, can be
improved to O(n) using the simplified top-down
calculation scheme whereby from among all the nodes we
first locate the particular {"}parental{"} node to
which the bug belongs and then the bug itself among the
sibs within the node. On average, about 5-7 problems
are adequate to raise the belief function of the bug to
95\% level, based on the evidential reasoning
schemes.",
}
@InProceedings{Desmarais93,
author = "Michel C. Desmarais and Jiming Liu",
title = "Exploring the Applications of User-Expertise
Assessment for Intelligent Interfaces",
booktitle = "Proceedings of ACM INTERCHI'93 Conference on Human
Factors in Computing Systems",
series = "Collecting User-Information for System Design",
pages = "308--313",
year = "1993",
copyright = "(c) Copyright 1993 Association for Computing
Machinery",
keywords = "User-expertise assessment, Probabilistic reasoning,
Evidence aggregation, Entropy, Intelligent interfaces,
Adaptive training systems, Knowledge spaces",
abstract = "An adaptive user interface relies, to a large extent,
upon an adequate user model (e.g., a representation of
user-expertise). However, building a user model may be
a tedious and time consuming task that will render such
an interface unattractive to developers. We thus need
an effective means of inferring the user model at low
cost. In this paper, we describe a technique for
automatically inferring a fine-grain model of a user's
knowledge state based on a small number of
observations. With this approach, the domain of
knowledge to be evaluated is represented as a network
of nodes (knowledge units -- KU) and links
(implications) induced from empirical user profiles.
The user knowledge state is specified as a set of
weights attached to the knowledge units that indicate
the likelihood of mastery. These weights are updated
every time a knowledge unit is reassigned a new weight
(e.g., by a question-and-answer process). The updating
scheme is based on the Dempster-Shafer algorithm. A
User Knowledge Assessment Tool (UKAT) that employs this
technique has been implemented. By way of simulations,
we explore an entropy-based method of choosing
questions, and compare the results with a random
sampling method. The experimental results show that the
proposed knowledge assessment and questioning methods
are useful and efficient in inferring detailed models
of user-expertise, but the entropy-based method can
induce a bias in some circumstances.",
}
Found 40 references (maximum number of matches set to 40) in 29 bibliographies.
You used 35 seconds of our CPU time.