Publicaciones
2011 |
L.C. Ballejos, J.M. Montagna Modeling stakeholders for information systems design processes (Artículo de revista) Requirements Engineering, 16 (4), pp. 281-296, 2011, (cited By 7). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Ballejos2011281, title = {Modeling stakeholders for information systems design processes}, author = { L.C. Ballejos and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80455155164&partnerID=40&md5=e8bbd0ce071809810976c74fdd31dc29}, doi = {10.1007/s00766-011-0123-2}, year = {2011}, date = {2011-01-01}, journal = {Requirements Engineering}, volume = {16}, number = {4}, pages = {281-296}, abstract = {In the software engineering area, stakeholders play a significant role in requirements elicitation and validation. Moreover, all the project management is integrally affected by stakeholders' perspectives and their participation. This effect is strengthened when projects involve several organizations. Thus, a clear and explicit representation of the stakeholders and their attributes is required in order to achieve their effective management. The integration of this representation with other models capturing the knowledge of engineering design processes can be of great utility in software development projects. In this sense, this article describes the construction of an integrated model for representing stakeholders in information systems design processes. This proposal considers diverse attributes related to stakeholders and gives information for performing quantitative calculations about their interest and influence over the project. Thus, more inclusive experiences of the information systems development can be supported, even more if contexts with the participation of several organizations are considered. © 2011 Springer-Verlag London Limited.}, note = {cited By 7}, keywords = {}, pubstate = {published}, tppubtype = {article} } In the software engineering area, stakeholders play a significant role in requirements elicitation and validation. Moreover, all the project management is integrally affected by stakeholders' perspectives and their participation. This effect is strengthened when projects involve several organizations. Thus, a clear and explicit representation of the stakeholders and their attributes is required in order to achieve their effective management. The integration of this representation with other models capturing the knowledge of engineering design processes can be of great utility in software development projects. In this sense, this article describes the construction of an integrated model for representing stakeholders in information systems design processes. This proposal considers diverse attributes related to stakeholders and gives information for performing quantitative calculations about their interest and influence over the project. Thus, more inclusive experiences of the information systems development can be supported, even more if contexts with the participation of several organizations are considered. © 2011 Springer-Verlag London Limited. |
Y. Fumero, G. Corsano, J.M. Montagna Simultaneous design and scheduling of a plant for producing ethanol and derivatives (Artículo de revista) Computer Aided Chemical Engineering, 29 , pp. 1416-1420, 2011, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fumero20111416, title = {Simultaneous design and scheduling of a plant for producing ethanol and derivatives}, author = { Y. Fumero and G. Corsano and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79958814765&partnerID=40&md5=fefbf41bf1e1e0838525a7a5d0874ae6}, doi = {10.1016/B978-0-444-54298-4.50062-3}, year = {2011}, date = {2011-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {29}, pages = {1416-1420}, abstract = {In this paper, a mixed integer linear programming model is formulated for the simultaneous design and scheduling of a semicontinuous/batch plant for producing ethanol and two types of yeast. Yeast productions emerge as a sustainable alternative for ethanol residues. The optimal plant configuration, unit sizes, number and size of batches in the campaign and its sequencing is obtained in order to fulfill the ethanol and yeast demands minimizing the investment cost. A novel set of scheduling constraints is proposed for this kind of plants. © 2011 Elsevier B.V.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper, a mixed integer linear programming model is formulated for the simultaneous design and scheduling of a semicontinuous/batch plant for producing ethanol and two types of yeast. Yeast productions emerge as a sustainable alternative for ethanol residues. The optimal plant configuration, unit sizes, number and size of batches in the campaign and its sequencing is obtained in order to fulfill the ethanol and yeast demands minimizing the investment cost. A novel set of scheduling constraints is proposed for this kind of plants. © 2011 Elsevier B.V. |
Y. Fumero, G. Corsano, J.M. Montagna Detailed design of multiproduct batch plants considering production scheduling (Artículo de revista) Industrial and Engineering Chemistry Research, 50 (10), pp. 6146-6160, 2011, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fumero20116146, title = {Detailed design of multiproduct batch plants considering production scheduling}, author = { Y. Fumero and G. Corsano and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79955909794&partnerID=40&md5=aab0c434e32d3cbfd3ee8af810a0268b}, doi = {10.1021/ie1008376}, year = {2011}, date = {2011-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {50}, number = {10}, pages = {6146-6160}, abstract = {Most previous approaches for the design of multiproduct batch plants have assumed the simplest scheduling policy. In order to simplify the formulation and take into account that many times demands are uncertain, they have used single product campaigns to determine the plant configuration and select the unit sizes. From the commercial point of view, this production mode is not realistic: for example, huge inventories should be kept to support this approach. However, when a stable context can be assured, the simultaneous resolution of design and a more detailed scheduling allows assessing different trade-offs. This article presents a new mixed integer linear programming (MILP) formulation assuming mixed product campaigns. Now, the composition and the sequence of the batches in the campaign must be determined as well as the assignment of batches to units when parallel units are used. Taking into account that the plant configuration is simultaneously obtained, the scheduling problem must be solved without knowing the number of available units and their sizes. Several examples are presented in order to show the performance of the proposed approach. © 2011 American Chemical Society.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most previous approaches for the design of multiproduct batch plants have assumed the simplest scheduling policy. In order to simplify the formulation and take into account that many times demands are uncertain, they have used single product campaigns to determine the plant configuration and select the unit sizes. From the commercial point of view, this production mode is not realistic: for example, huge inventories should be kept to support this approach. However, when a stable context can be assured, the simultaneous resolution of design and a more detailed scheduling allows assessing different trade-offs. This article presents a new mixed integer linear programming (MILP) formulation assuming mixed product campaigns. Now, the composition and the sequence of the batches in the campaign must be determined as well as the assignment of batches to units when parallel units are used. Taking into account that the plant configuration is simultaneously obtained, the scheduling problem must be solved without knowing the number of available units and their sizes. Several examples are presented in order to show the performance of the proposed approach. © 2011 American Chemical Society. |
G. Corsano, J.M. Montagna Mathematical modeling for simultaneous design of plants and supply chain in the batch process industry (Artículo de revista) Computers and Chemical Engineering, 35 (1), pp. 149-164, 2011, (cited By 10). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Corsano2011149, title = {Mathematical modeling for simultaneous design of plants and supply chain in the batch process industry}, author = { G. Corsano and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78649456726&partnerID=40&md5=9d5d1ee13c96dc3eb6a743aaf2dfd599}, doi = {10.1016/j.compchemeng.2010.06.008}, year = {2011}, date = {2011-01-01}, journal = {Computers and Chemical Engineering}, volume = {35}, number = {1}, pages = {149-164}, abstract = {Most supply chain design models have focused on the integration problem, where links among nodes must be settled in order to allow an efficient operation of the whole system. At this level, all the problem elements are modeled like black boxes, and the optimal solution determines the nodes allocation and their capacity, and links among nodes. In this work, a new approach is proposed where decisions about plant design are simultaneously made with operational and planning decisions on the supply chain. Thus, tradeoffs between the plant structure and the network design are assessed. The model considers unit duplications and the allocation of storage tanks for plant design. Using different sets of discrete sizes for batch units and tanks, a mixed integer linear programming model (MILP) is attained. The proposed formulation is compared with other non-integrated approaches in order to illustrate the advantages of the presented simultaneous approach. © 2010 Elsevier Ltd.}, note = {cited By 10}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most supply chain design models have focused on the integration problem, where links among nodes must be settled in order to allow an efficient operation of the whole system. At this level, all the problem elements are modeled like black boxes, and the optimal solution determines the nodes allocation and their capacity, and links among nodes. In this work, a new approach is proposed where decisions about plant design are simultaneously made with operational and planning decisions on the supply chain. Thus, tradeoffs between the plant structure and the network design are assessed. The model considers unit duplications and the allocation of storage tanks for plant design. Using different sets of discrete sizes for batch units and tanks, a mixed integer linear programming model (MILP) is attained. The proposed formulation is compared with other non-integrated approaches in order to illustrate the advantages of the presented simultaneous approach. © 2010 Elsevier Ltd. |
M.S. Moreno, J.M. Montagna Multiproduct batch plants design using linear process performance models (Artículo de revista) AIChE Journal, 57 (1), pp. 122-135, 2011, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Moreno2011122, title = {Multiproduct batch plants design using linear process performance models}, author = { M.S. Moreno and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78650039700&partnerID=40&md5=ef5cc7b587c22c8eb2b9a06e5fe6acd8}, doi = {10.1002/aic.12248}, year = {2011}, date = {2011-01-01}, journal = {AIChE Journal}, volume = {57}, number = {1}, pages = {122-135}, abstract = {In this contribution, a novel linear generalized disjunctive programming (LGDP) model is developed for the design of multiproduct batch plants optimizing both process variables and the structure of the plant through the use of process performance models. These models describe unit operations using explicit expressions for the size and time factors as functions of the process variables with the highest impact. To attain a linear formulation, values of the process variables as well as unit sizes are selected from a set of meaningful discrete values provided by the designer. Regarding structural alternatives, both kinds of unit duplications in series and in parallel are considered in this approach. The inclusion of the duplication in series requires different detailed models that depend on the structure selected. Thus, in a new approach for the multiproduct batch plant design, a set of potential structural alternatives for the plant is defined. 2010 American Institute of Chemical Engineers (AIChE).}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this contribution, a novel linear generalized disjunctive programming (LGDP) model is developed for the design of multiproduct batch plants optimizing both process variables and the structure of the plant through the use of process performance models. These models describe unit operations using explicit expressions for the size and time factors as functions of the process variables with the highest impact. To attain a linear formulation, values of the process variables as well as unit sizes are selected from a set of meaningful discrete values provided by the designer. Regarding structural alternatives, both kinds of unit duplications in series and in parallel are considered in this approach. The inclusion of the duplication in series requires different detailed models that depend on the structure selected. Thus, in a new approach for the multiproduct batch plant design, a set of potential structural alternatives for the plant is defined. 2010 American Institute of Chemical Engineers (AIChE). |
A.H. González, E.J. Adam, M.G. Marcovecchio, D. Odloak Application of an extended IHMPC to an unstable reactor system: Study of feasibility and performance (Artículo de revista) Journal of Process Control, 21 (10), pp. 1493-1503, 2011, (cited By 14). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{González20111493, title = {Application of an extended IHMPC to an unstable reactor system: Study of feasibility and performance}, author = { A.H. González and E.J. Adam and M.G. Marcovecchio and D. Odloak}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80054722825&partnerID=40&md5=51a030458253976bb78bc3ed92461814}, doi = {10.1016/j.jprocont.2011.05.011}, year = {2011}, date = {2011-01-01}, journal = {Journal of Process Control}, volume = {21}, number = {10}, pages = {1493-1503}, abstract = {Almost all the theoretical aspects of model predictive control (MPC), such as stability, recursive feasibility and even the optimality are now well established for both, the nominal and the robust case. The stability and recursive feasibility are usually guaranteed by means of additional terminal constraints, while the optimality is achieved considering closed-loop predictions. However, these significant improvements are not always applicable to real processes. An interesting case is the control of open-loop unstable reactor systems. There, the traditional infinite horizon MPC (IHMPC), which constitutes the simplest strategy ensuring stability, needs to include an additional terminal constraint to cancel the unstable modes, producing in this way feasibility problems. The terminal constraint could be an equality or an inclusion constraint, depending on the local controller assumed for predictions. In both cases, however, a prohibitive length of the control horizon is necessary to produce a reasonable domain of attraction for real applications. In this work, we study the application of an IHMPC formulation that has maximal domain of attraction (i.e., the domain of attraction is determined by the system and the constraints, and not by the controller) to an unstable reactor system. It is shown that the method is suitable for real applications in the sense that it accounts for the case of output tracking and it is offset free if the output target is reachable, and minimizes the offset if some of the constraints become active at steady-state. © 2011 Elsevier Ltd. All rights reserved.}, note = {cited By 14}, keywords = {}, pubstate = {published}, tppubtype = {article} } Almost all the theoretical aspects of model predictive control (MPC), such as stability, recursive feasibility and even the optimality are now well established for both, the nominal and the robust case. The stability and recursive feasibility are usually guaranteed by means of additional terminal constraints, while the optimality is achieved considering closed-loop predictions. However, these significant improvements are not always applicable to real processes. An interesting case is the control of open-loop unstable reactor systems. There, the traditional infinite horizon MPC (IHMPC), which constitutes the simplest strategy ensuring stability, needs to include an additional terminal constraint to cancel the unstable modes, producing in this way feasibility problems. The terminal constraint could be an equality or an inclusion constraint, depending on the local controller assumed for predictions. In both cases, however, a prohibitive length of the control horizon is necessary to produce a reasonable domain of attraction for real applications. In this work, we study the application of an IHMPC formulation that has maximal domain of attraction (i.e., the domain of attraction is determined by the system and the constraints, and not by the controller) to an unstable reactor system. It is shown that the method is suitable for real applications in the sense that it accounts for the case of output tracking and it is offset free if the output target is reachable, and minimizes the offset if some of the constraints become active at steady-state. © 2011 Elsevier Ltd. All rights reserved. |
M.G. Marcovecchio, A.Q. Novais, I.E. Grossmann A deterministic optimization approach for the unit commitment problem (Artículo de revista) Computer Aided Chemical Engineering, 29 , pp. 533-536, 2011, (cited By 3). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Marcovecchio2011533, title = {A deterministic optimization approach for the unit commitment problem}, author = { M.G. Marcovecchio and A.Q. Novais and I.E. Grossmann}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79958849333&partnerID=40&md5=a9a6fef303f90dc1998f5fd9de002cc6}, doi = {10.1016/B978-0-444-53711-9.50107-3}, year = {2011}, date = {2011-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {29}, pages = {533-536}, abstract = {Reliable power production is critical to the profitability of electricity utilities. This concern, together with the need for less dependence on fossil fuels consumption and for CO2 mitigation, is leading to the prospective use of combined forms of conventional and alternative forms of energy generation as the most promising means to meet an increasing demand for electric power. Unit commitment (UC) arises in this context as a most critical decision process, involving a large number of interacting factors and underlying therefore a complex optimization problem. As such, the UC problem has been receiving a good deal of attention in the literature, with heuristic approaches being most dominant. As an alternative, a deterministic optimization approach is proposed in this paper and applied to the thermal UC problem. The model developed is a mixed integer quadratic programming problem (MIQP) having the objective of minimizing the fuel consumption (calculated by a quadratic function) and start up costs, with a strategy proposed for its solution that exploits the characteristics of the UC problem. This consists of valid integer cutting planes and a Branch and Bound (B&B) search, which are developed and combined resulting in a Branch and Cut (B&C) algorithm particular to the UC problem. The approach is described and implemented to solve a reference case study. Although the UC problem is NP-hard, the results show that the proposed technique is capable of providing the optimal solution for real-world sized instances. © 2011 Elsevier B.V.}, note = {cited By 3}, keywords = {}, pubstate = {published}, tppubtype = {article} } Reliable power production is critical to the profitability of electricity utilities. This concern, together with the need for less dependence on fossil fuels consumption and for CO2 mitigation, is leading to the prospective use of combined forms of conventional and alternative forms of energy generation as the most promising means to meet an increasing demand for electric power. Unit commitment (UC) arises in this context as a most critical decision process, involving a large number of interacting factors and underlying therefore a complex optimization problem. As such, the UC problem has been receiving a good deal of attention in the literature, with heuristic approaches being most dominant. As an alternative, a deterministic optimization approach is proposed in this paper and applied to the thermal UC problem. The model developed is a mixed integer quadratic programming problem (MIQP) having the objective of minimizing the fuel consumption (calculated by a quadratic function) and start up costs, with a strategy proposed for its solution that exploits the characteristics of the UC problem. This consists of valid integer cutting planes and a Branch and Bound (B&B) search, which are developed and combined resulting in a Branch and Cut (B&C) algorithm particular to the UC problem. The approach is described and implemented to solve a reference case study. Although the UC problem is NP-hard, the results show that the proposed technique is capable of providing the optimal solution for real-world sized instances. © 2011 Elsevier B.V. |
A.H. González, E.J. Adam, M.G. Marcovecchio, D. Odloak Stable MPC for tracking with maximal domain of attraction (Artículo de revista) Journal of Process Control, 21 (4), pp. 573-584, 2011, (cited By 10). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{González2011573, title = {Stable MPC for tracking with maximal domain of attraction}, author = { A.H. González and E.J. Adam and M.G. Marcovecchio and D. Odloak}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79953813602&partnerID=40&md5=3ca6cf52ec97105e960af872b0f6ed36}, doi = {10.1016/j.jprocont.2011.01.002}, year = {2011}, date = {2011-01-01}, journal = {Journal of Process Control}, volume = {21}, number = {4}, pages = {573-584}, abstract = {In this work, a stable MPC that maximizes the domain of attraction of the closed-loop system is proposed. The proposed approach is suitable to real applications in the sense that it accounts for the case of output tracking, it is offset free if the output target is reachable and minimizes the offset if some of the constraints are active at steady state. The new approach is based on the definition of a Minkowski functional related to the input and terminal constraints of the stable infinite horizon MPC. It is also shown that the domain of attraction is defined by the system model and the constraints, and it does not depend on the controller tuning parameters. The proposed controller is illustrated with small order examples of the control literature. © 2011 Elsevier Ltd. All rights reserved.}, note = {cited By 10}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this work, a stable MPC that maximizes the domain of attraction of the closed-loop system is proposed. The proposed approach is suitable to real applications in the sense that it accounts for the case of output tracking, it is offset free if the output target is reachable and minimizes the offset if some of the constraints are active at steady state. The new approach is based on the definition of a Minkowski functional related to the input and terminal constraints of the stable infinite horizon MPC. It is also shown that the domain of attraction is defined by the system model and the constraints, and it does not depend on the controller tuning parameters. The proposed controller is illustrated with small order examples of the control literature. © 2011 Elsevier Ltd. All rights reserved. |
V. Bogado, S. Gonnet, H.P. Leone An approach based on DEVS for evaluating quality attributes (Conferencia) 2011, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Bogado2011110, title = {An approach based on DEVS for evaluating quality attributes}, author = { V. Bogado and S. Gonnet and H.P. Leone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79955933291&partnerID=40&md5=f388d2888dbc7dca8b6af16f7e03a2c2}, doi = {10.1109/SCCC.2010.13}, year = {2011}, date = {2011-01-01}, journal = {Proceedings - International Conference of the Chilean Computer Science Society, SCCC}, pages = {110-118}, abstract = {The present work provides a novel approach for the simulation of software at early stage of its development, using its architecture. For this purpose, DEVS formalism is used to introduce the discrete event simulation advantages in the context of software architecture. DEVS provides a modular and hierarchical way to build blocks in a simulation model, which fits naturally to architectural concepts. The proposal supports the transformation of architecture elements into simulation elements, decoupling the model from the simulator. The goal is to acquire quantitative information for evaluating the quality of the system at early stage, bringing support to make decisions. © 2010 IEEE.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The present work provides a novel approach for the simulation of software at early stage of its development, using its architecture. For this purpose, DEVS formalism is used to introduce the discrete event simulation advantages in the context of software architecture. DEVS provides a modular and hierarchical way to build blocks in a simulation model, which fits naturally to architectural concepts. The proposal supports the transformation of architecture elements into simulation elements, decoupling the model from the simulator. The goal is to acquire quantitative information for evaluating the quality of the system at early stage, bringing support to make decisions. © 2010 IEEE. |
2010 |
F. Arredondo, E.C. Martínez Learning and adaptation of a policy for dynamic order acceptance in make-to-order manufacturing (Artículo de revista) Computers and Industrial Engineering, 58 (1), pp. 70-83, 2010, (cited By 17). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Arredondo201070, title = {Learning and adaptation of a policy for dynamic order acceptance in make-to-order manufacturing}, author = { F. Arredondo and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-72049095063&partnerID=40&md5=6be8ec588142cf9270c5e3442d874fb2}, doi = {10.1016/j.cie.2009.08.005}, year = {2010}, date = {2010-01-01}, journal = {Computers and Industrial Engineering}, volume = {58}, number = {1}, pages = {70-83}, abstract = {Order acceptance under uncertainty is a critical decision-making problem at the interface between customer relationship management and production planning of order-driven manufacturing systems. In this work, a novel approach for simulation-based development and on-line adaptation of a policy for dynamic order acceptance under uncertainty in make-to-order manufacturing using average-reward reinforcement learning is proposed. Locally weighted regression is used to generalize the gain value of accepting or rejecting similar orders regarding attributes such as product mix, price, size and due date. The order acceptance policy is learned by classifying an arriving order as belonging either to the acceptance set or to the rejection set. For exploitation, only orders in the acceptance set must be chosen for shop-floor scheduling. For exploration some orders from the rejection set are also considered as candidates for acceptance. Comparisons made with different order acceptance heuristics highlight the effectiveness of the proposed ARLOA algorithm to maximize the average revenue obtained per unit cost of installed capacity whilst quickly responding to unknown variations in order arrival rates and attributes. © 2009 Elsevier Ltd. All rights reserved.}, note = {cited By 17}, keywords = {}, pubstate = {published}, tppubtype = {article} } Order acceptance under uncertainty is a critical decision-making problem at the interface between customer relationship management and production planning of order-driven manufacturing systems. In this work, a novel approach for simulation-based development and on-line adaptation of a policy for dynamic order acceptance under uncertainty in make-to-order manufacturing using average-reward reinforcement learning is proposed. Locally weighted regression is used to generalize the gain value of accepting or rejecting similar orders regarding attributes such as product mix, price, size and due date. The order acceptance policy is learned by classifying an arriving order as belonging either to the acceptance set or to the rejection set. For exploitation, only orders in the acceptance set must be chosen for shop-floor scheduling. For exploration some orders from the rejection set are also considered as candidates for acceptance. Comparisons made with different order acceptance heuristics highlight the effectiveness of the proposed ARLOA algorithm to maximize the average revenue obtained per unit cost of installed capacity whilst quickly responding to unknown variations in order arrival rates and attributes. © 2009 Elsevier Ltd. All rights reserved. |
M. Cristaldi, S. Cristea, E.C. Martínez Run-to-run convergence analysis of model-based policy iteration algorithms for experimental optimization of batch processes (Artículo de revista) Computer Aided Chemical Engineering, 28 (C), pp. 925-930, 2010, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Cristaldi2010925, title = {Run-to-run convergence analysis of model-based policy iteration algorithms for experimental optimization of batch processes}, author = { M. Cristaldi and S. Cristea and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78651453265&partnerID=40&md5=cbd7de87834009501bd83d619872811e}, doi = {10.1016/S1570-7946(10)28155-5}, year = {2010}, date = {2010-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {28}, number = {C}, pages = {925-930}, abstract = {Convergence analysis of iterative identification-optimization schemes is a key issue in modeling for optimization of batch processes. In this work, it is formally shown that for convergence is sufficient to guarantee that parametric uncertainty is increasingly reduced on a run-to-run basis. Convergence of a policy iteration algorithm to an optimal policy which satisfies the Hamilton-Jacobi-Bellman equation is thus assured as long as parametric uncertainty is iteratively reduced such that the performance prediction mismatch is driven to zero. The integration of global sensivity analysis with confidence interval boostrapping in the design of a convergent algorithm for model-based policy iteration is proposed. A simple bioprocess is used to exemplify run-to-run improvement. © 2010 Elsevier B.V. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Convergence analysis of iterative identification-optimization schemes is a key issue in modeling for optimization of batch processes. In this work, it is formally shown that for convergence is sufficient to guarantee that parametric uncertainty is increasingly reduced on a run-to-run basis. Convergence of a policy iteration algorithm to an optimal policy which satisfies the Hamilton-Jacobi-Bellman equation is thus assured as long as parametric uncertainty is iteratively reduced such that the performance prediction mismatch is driven to zero. The integration of global sensivity analysis with confidence interval boostrapping in the design of a convergent algorithm for model-based policy iteration is proposed. A simple bioprocess is used to exemplify run-to-run improvement. © 2010 Elsevier B.V. All rights reserved. |
E.C. Martínez, F. Arredondo Order acceptance for revenue management and capacity allocation in make-to-order batch plants (Artículo de revista) Computer Aided Chemical Engineering, 28 (C), pp. 1189-1194, 2010, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Martínez20101189, title = {Order acceptance for revenue management and capacity allocation in make-to-order batch plants}, author = { E.C. Martínez and F. Arredondo}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78651463182&partnerID=40&md5=ba214c607258c05fd1351787bc6f6930}, doi = {10.1016/S1570-7946(10)28199-3}, year = {2010}, date = {2010-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {28}, number = {C}, pages = {1189-1194}, abstract = {Challenging issues for optimal capacity allocation in make-to-order (MTO) batch plants are fixed manufacturing capacity and a highly diversified product portfolio compounded with pronounced fluctuations in demand and profitability. In this work, revenue management is carried out by maximizing profits under uncertainty in MTO production systems using an intelligent decision rule to dynamically control the inflow of orders. A novel approach for learning and update of an order acceptance policy using data is proposed. Comparisons made with threshold heuristics for capacity control in a multiproduct batch plant highlight superior performance of the dynamic order admission policy resulting from selective order acceptance using revenue management. © 2010 Elsevier B.V. All rights reserved.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } Challenging issues for optimal capacity allocation in make-to-order (MTO) batch plants are fixed manufacturing capacity and a highly diversified product portfolio compounded with pronounced fluctuations in demand and profitability. In this work, revenue management is carried out by maximizing profits under uncertainty in MTO production systems using an intelligent decision rule to dynamically control the inflow of orders. A novel approach for learning and update of an order acceptance policy using data is proposed. Comparisons made with threshold heuristics for capacity control in a multiproduct batch plant highlight superior performance of the dynamic order admission policy resulting from selective order acceptance using revenue management. © 2010 Elsevier B.V. All rights reserved. |
J. Palombarini, E.C. Martínez Learning to repair plans and schedules using a relational (deictic) representation (Artículo de revista) Brazilian Journal of Chemical Engineering, 27 (3), pp. 413-427, 2010, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Palombarini2010413, title = {Learning to repair plans and schedules using a relational (deictic) representation}, author = { J. Palombarini and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78650102897&partnerID=40&md5=dc8831d8a3b9953eab6acdce1094fd42}, year = {2010}, date = {2010-01-01}, journal = {Brazilian Journal of Chemical Engineering}, volume = {27}, number = {3}, pages = {413-427}, abstract = {Unplanned and abnormal events may have a significant impact on the feasibility of plans and schedules which requires to repair them 'on-the-fly' to guarantee due date compliance of orders-in-progress and negotiating delivery conditions for new orders. In this work, a repair-based rescheduling approach based on the integration of intensive simulations with logical and relational reinforcement learning is proposed. Based on a relational (deictic) representation of schedule states, a number of repair operators have been designed to guide the search towards a goal state. The knowledge generated via simulation is encoded in a relational regression tree for the Q-value function defining the utility of applying a given repair operator at a given schedule state. A prototype implementation in Prolog language is discussed using a representative example of three batch extruders processing orders for four different products. The learning curve for the problem of inserting a new order vividly illustrates the advantages of logical and relational learning in rescheduling.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } Unplanned and abnormal events may have a significant impact on the feasibility of plans and schedules which requires to repair them 'on-the-fly' to guarantee due date compliance of orders-in-progress and negotiating delivery conditions for new orders. In this work, a repair-based rescheduling approach based on the integration of intensive simulations with logical and relational reinforcement learning is proposed. Based on a relational (deictic) representation of schedule states, a number of repair operators have been designed to guide the search towards a goal state. The knowledge generated via simulation is encoded in a relational regression tree for the Q-value function defining the utility of applying a given repair operator at a given schedule state. A prototype implementation in Prolog language is discussed using a representative example of three batch extruders processing orders for four different products. The learning curve for the problem of inserting a new order vividly illustrates the advantages of logical and relational learning in rescheduling. |
E. Fernández, H.E. Salomone, O.J. Chiotti Model based on Bayesian networks for monitoring events in a supply chain (Artículo de revista) IFIP Advances in Information and Communication Technology, 338 AICT , pp. 358-365, 2010, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fernández2010358, title = {Model based on Bayesian networks for monitoring events in a supply chain}, author = { E. Fernández and H.E. Salomone and O.J. Chiotti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78649933028&partnerID=40&md5=c277aa8803f38bd97616fb55d0269baf}, doi = {10.1007/978-3-642-16358-6_45}, year = {2010}, date = {2010-01-01}, journal = {IFIP Advances in Information and Communication Technology}, volume = {338 AICT}, pages = {358-365}, abstract = {The execution of supply process orders in a supply chain is conditioned by different types of disruptive events that must be detected and solved in real time. This requires the ability to proactively monitor, analyze and notify disruptive events. In this work we present a model that captures this functionality and was used as the foundation to design a software agent. A reactive-deliberative hybrid architecture provides the ability to proactively detect, analyze and notify disruptive events that take place in a supply chain. For the deliberative performance of the agent, a cause-effect relation model based on a Bayesian network with decision nodes is proposed. © 2010 IFIP International Federation for Information Processing.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } The execution of supply process orders in a supply chain is conditioned by different types of disruptive events that must be detected and solved in real time. This requires the ability to proactively monitor, analyze and notify disruptive events. In this work we present a model that captures this functionality and was used as the foundation to design a software agent. A reactive-deliberative hybrid architecture provides the ability to proactively detect, analyze and notify disruptive events that take place in a supply chain. For the deliberative performance of the agent, a cause-effect relation model based on a Bayesian network with decision nodes is proposed. © 2010 IFIP International Federation for Information Processing. |
E. Fernández, H.E. Salomone, O.J. Chiotti Compound web service for supply processes monitoring to anticipate disruptive event (Artículo de revista) IFIP Advances in Information and Communication Technology, 341 AICT , pp. 51-60, 2010, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fernández201051, title = {Compound web service for supply processes monitoring to anticipate disruptive event}, author = { E. Fernández and H.E. Salomone and O.J. Chiotti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84862536966&partnerID=40&md5=37bc9e6b6b4076f24ec3cd1ab63ad3e7}, doi = {10.1007/978-3-642-16283-1_9}, year = {2010}, date = {2010-01-01}, journal = {IFIP Advances in Information and Communication Technology}, volume = {341 AICT}, pages = {51-60}, abstract = {The execution of supply process orders in a supply chain is conditioned by different types of disruptive events that must be detected and solved in real time. In this work we present a compound web service that performs the monitoring and notification functions of a supply chain event management system. This web service is designed based on a reference model that we have proposed to improve the event management activity through a deeper analysis of the occurrence and causality of events, leading to anticipate an exception during the execution of a supply process order. The web service composition is defined based on business processes. The ability to proactively detect, analyze and notify disruptive events is given through of a Bayesian network with decision nodes. © 2010 IFIP International Federation for Information Processing.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } The execution of supply process orders in a supply chain is conditioned by different types of disruptive events that must be detected and solved in real time. In this work we present a compound web service that performs the monitoring and notification functions of a supply chain event management system. This web service is designed based on a reference model that we have proposed to improve the event management activity through a deeper analysis of the occurrence and causality of events, leading to anticipate an exception during the execution of a supply process order. The web service composition is defined based on business processes. The ability to proactively detect, analyze and notify disruptive events is given through of a Bayesian network with decision nodes. © 2010 IFIP International Federation for Information Processing. |
I.M. Lazarte, E. Tello-Leal, J. Roa, O.J. Chiotti, P.D. Villarreal Model-driven development methodology for B2B collaborations (Conferencia) 2010, (cited By 4). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Lazarte201069, title = {Model-driven development methodology for B2B collaborations}, author = { I.M. Lazarte and E. Tello-Leal and J. Roa and O.J. Chiotti and P.D. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79951940564&partnerID=40&md5=03aef34b2f5ce5d9776ea05844167e68}, doi = {10.1109/EDOCW.2010.21}, year = {2010}, date = {2010-01-01}, journal = {Proceedings - IEEE International Enterprise Distributed Object Computing Workshop, EDOC}, pages = {69-78}, abstract = {The design and implementation of collaborative business processes and the Business-to-Business (B2B) systems that support them is an important issue in order to enable enterprises to set up B2B collaborations. This involves new challenges, mainly regarding the ability to cope with change, decentralized management, peer-to-peer interactions, preservation of enterprise autonomy, and the support for interoperability. The design and implementation of B2B collaborations require the use of conceptual models that differ in viewpoint, target people, abstraction level and granularity. This makes the use of traditional software development methodologies inappropriate. In this paper, we propose a methodology for the design and implementation of B2B collaborations that support the above issues. The methodology supports a development framework and is based on the Model-Driven Development (MDD). The methodology provides guidelines, languages, methods, model transformations and tools to support the representation of business requirements, the definition of technology- independent collaborative process models, the derivation of technology- independent processes and IT architectures that enterprises require to support collaborative processes as well as the generation of a technology-specific solution for each enterprise. This methodology guarantees the alignment and consistency between the business and technological solutions for B2B collaborations. © 2010 IEEE.}, note = {cited By 4}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The design and implementation of collaborative business processes and the Business-to-Business (B2B) systems that support them is an important issue in order to enable enterprises to set up B2B collaborations. This involves new challenges, mainly regarding the ability to cope with change, decentralized management, peer-to-peer interactions, preservation of enterprise autonomy, and the support for interoperability. The design and implementation of B2B collaborations require the use of conceptual models that differ in viewpoint, target people, abstraction level and granularity. This makes the use of traditional software development methodologies inappropriate. In this paper, we propose a methodology for the design and implementation of B2B collaborations that support the above issues. The methodology supports a development framework and is based on the Model-Driven Development (MDD). The methodology provides guidelines, languages, methods, model transformations and tools to support the representation of business requirements, the definition of technology- independent collaborative process models, the derivation of technology- independent processes and IT architectures that enterprises require to support collaborative processes as well as the generation of a technology-specific solution for each enterprise. This methodology guarantees the alignment and consistency between the business and technological solutions for B2B collaborations. © 2010 IEEE. |
E. Tello-Leal, O.J. Chiotti, P.D. Villarreal An agent-based B2B collaboration platform for executing collaborative business processes (Artículo de revista) IFIP Advances in Information and Communication Technology, 341 AICT , pp. 40-50, 2010, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Tello-Leal201040, title = {An agent-based B2B collaboration platform for executing collaborative business processes}, author = { E. Tello-Leal and O.J. Chiotti and P.D. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84862657017&partnerID=40&md5=866db3ca6acebf58d58ed04f5e255e46}, doi = {10.1007/978-3-642-16283-1_8}, year = {2010}, date = {2010-01-01}, journal = {IFIP Advances in Information and Communication Technology}, volume = {341 AICT}, pages = {40-50}, abstract = {Nowadays, organizations establish Business-to-Business (B2B) collaborations with their business partners. Inter-organizational collaboration is carried out through the execution of collaborative business processes. Organizations are requiring and undergoing the setting up of dynamic B2B collaborations, instead of conducting face-to-face negotiations and agreements for executing collaborative processes. This implies that business partners, maybe without a previous relationship, agree dynamically on the execution of collaborative processes based on predefined models of these processes. In this work, we propose an B2B collaboration platform which provides agent-based systems and interaction mechanisms in order to enable organizations to establish dynamic agreements with their partners and carry out the decentralized execution of collaborative processes. Agents use models of collaborative rocesses to enact them in a dynamic way. The role an organization performs in a collaborative process is translated into a Petri Net model that a collaboration agent interpret to execute the process. © 2010 IFIP International Federation for Information Processing.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Nowadays, organizations establish Business-to-Business (B2B) collaborations with their business partners. Inter-organizational collaboration is carried out through the execution of collaborative business processes. Organizations are requiring and undergoing the setting up of dynamic B2B collaborations, instead of conducting face-to-face negotiations and agreements for executing collaborative processes. This implies that business partners, maybe without a previous relationship, agree dynamically on the execution of collaborative processes based on predefined models of these processes. In this work, we propose an B2B collaboration platform which provides agent-based systems and interaction mechanisms in order to enable organizations to establish dynamic agreements with their partners and carry out the decentralized execution of collaborative processes. Agents use models of collaborative rocesses to enact them in a dynamic way. The role an organization performs in a collaborative process is translated into a Petri Net model that a collaboration agent interpret to execute the process. © 2010 IFIP International Federation for Information Processing. |
A. Guarnaschelli, O.J. Chiotti, H.E. Salomone Service oriented approach for autonomous exception management in supply chains (Artículo de revista) IFIP Advances in Information and Communication Technology, 341 AICT , pp. 292-303, 2010, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Guarnaschelli2010292, title = {Service oriented approach for autonomous exception management in supply chains}, author = { A. Guarnaschelli and O.J. Chiotti and H.E. Salomone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84862323629&partnerID=40&md5=6a5c2b1b20627ece15c3b5e131459782}, doi = {10.1007/978-3-642-16283-1_32}, year = {2010}, date = {2010-01-01}, journal = {IFIP Advances in Information and Communication Technology}, volume = {341 AICT}, pages = {292-303}, abstract = {Risk and uncertainty are inherent to Supply Chains; at the execution level unexpected events can disrupt the normal flow of supply processes creating a gap between planned operations and what is actually executed. These disruptions increment rescheduling frequency, generating reconfiguration costs and system's nervousness. This work proposes a web service based Business Process to support Autonomous Exception Management in Supply chains. © 2010 IFIP International Federation for Information Processing.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } Risk and uncertainty are inherent to Supply Chains; at the execution level unexpected events can disrupt the normal flow of supply processes creating a gap between planned operations and what is actually executed. These disruptions increment rescheduling frequency, generating reconfiguration costs and system's nervousness. This work proposes a web service based Business Process to support Autonomous Exception Management in Supply chains. © 2010 IFIP International Federation for Information Processing. |
M. Rubiolo, G. Stegmayer, O.J. Chiotti Neural networks and wireless communications modeling (Artículo de revista) IEEE Latin America Transactions, 8 (5), pp. 486-492, 2010, (cited By 3). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rubiolo2010486, title = {Neural networks and wireless communications modeling}, author = { M. Rubiolo and G. Stegmayer and O.J. Chiotti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79955487927&partnerID=40&md5=a844f8719d29ac62d0d5f32ef08ccf73}, doi = {10.1109/TLA.2010.5623499}, year = {2010}, date = {2010-01-01}, journal = {IEEE Latin America Transactions}, volume = {8}, number = {5}, pages = {486-492}, abstract = {This paper presents a free software tool that supports the next-generation Mobile Communications, through the automatic generation of models of components and electronic devices based on neural networks. This tool enables the creation, training, validation and simulation of the model directly from measurements made on devices of interest, using an interface totally oriented to non-experts in neural models. The resulting model can be exported automatically to a traditional circuit simulator to test different scenarios.}, note = {cited By 3}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper presents a free software tool that supports the next-generation Mobile Communications, through the automatic generation of models of components and electronic devices based on neural networks. This tool enables the creation, training, validation and simulation of the model directly from measurements made on devices of interest, using an interface totally oriented to non-experts in neural models. The resulting model can be exported automatically to a traditional circuit simulator to test different scenarios. |
P.D. Villarreal, I. Lazarte, J. Roa, O.J. Chiotti A modeling approach for collaborative business processes based on the UP-ColBPIP language (Artículo de revista) Lecture Notes in Business Information Processing, 43 LNBIP , pp. 318-329, 2010, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Villarreal2010318, title = {A modeling approach for collaborative business processes based on the UP-ColBPIP language}, author = { P.D. Villarreal and I. Lazarte and J. Roa and O.J. Chiotti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77953975380&partnerID=40&md5=732f2bba8aa57b5fe695e261819646bb}, doi = {10.1007/978-3-642-12186-9_30}, year = {2010}, date = {2010-01-01}, journal = {Lecture Notes in Business Information Processing}, volume = {43 LNBIP}, pages = {318-329}, abstract = {The modeling of collaborative business processes is an important issue in order to allow enterprises to implement B2B collaborations with their business partners. We have proposed an MDA-based methodology for the modeling, verification and implementation of collaborative processes. Since collaborative process models are the main artifacts in this MDA-based methodology, a suitable modeling approach is required to design collaborative processes. In this work we describe a modeling approach for collaborative processes based on the UP-ColBPIP language, which is oriented to support the model-driven development of collaborative processes and B2B information systems. The behavior of collaborative processes is modeled through interaction protocols. Enhances to the control flow constructors of interaction protocols are introduced. In addition, we describe an Eclipse-based tool that supports this language. © 2010 Springer-Verlag.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } The modeling of collaborative business processes is an important issue in order to allow enterprises to implement B2B collaborations with their business partners. We have proposed an MDA-based methodology for the modeling, verification and implementation of collaborative processes. Since collaborative process models are the main artifacts in this MDA-based methodology, a suitable modeling approach is required to design collaborative processes. In this work we describe a modeling approach for collaborative processes based on the UP-ColBPIP language, which is oriented to support the model-driven development of collaborative processes and B2B information systems. The behavior of collaborative processes is modeled through interaction protocols. Enhances to the control flow constructors of interaction protocols are introduced. In addition, we describe an Eclipse-based tool that supports this language. © 2010 Springer-Verlag. |
M. Vegetti, H.P. Leone, G.P. Henning A three level abstraction hierarchy to represent product structural information (Conferencia) 2 AIDSS , 2010, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Vegetti2010299, title = {A three level abstraction hierarchy to represent product structural information}, author = { M. Vegetti and H.P. Leone and G.P. Henning}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78649811836&partnerID=40&md5=c778200fcf6cfc4ad8c5d733b554d5c4}, year = {2010}, date = {2010-01-01}, journal = {ICEIS 2010 - Proceedings of the 12th International Conference on Enterprise Information Systems}, volume = {2 AIDSS}, pages = {299-308}, abstract = {Product models should integrate and efficiently manage all the information associated with products in the context of industrial enterprises or supply chains (SCs). Nowadays, it is quite common for an organization and even, each area within a company, to have its own product model. This situation leads to information duplication and its associated problems. In addition, traditional product models do not properly handle the high number of variants managed in today competitive markets. Therefore, there is a need for an integrated product model to be shared by the organizations participating in global SCs or all areas within a company. One way to reach an intelligent integration among product models is by means of an ontology. PRONTO (PRoduct ONTOlogy) is an ontology for the Product Modelling domain, able to efficiently handle product variants. This contribution presents a ConceptBase formalization of PRONTO, as well as an extension of it that allows the inference of product structural knowledge and the specification of valid products.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Product models should integrate and efficiently manage all the information associated with products in the context of industrial enterprises or supply chains (SCs). Nowadays, it is quite common for an organization and even, each area within a company, to have its own product model. This situation leads to information duplication and its associated problems. In addition, traditional product models do not properly handle the high number of variants managed in today competitive markets. Therefore, there is a need for an integrated product model to be shared by the organizations participating in global SCs or all areas within a company. One way to reach an intelligent integration among product models is by means of an ontology. PRONTO (PRoduct ONTOlogy) is an ontology for the Product Modelling domain, able to efficiently handle product variants. This contribution presents a ConceptBase formalization of PRONTO, as well as an extension of it that allows the inference of product structural knowledge and the specification of valid products. |
C.E. Alvez, A.R. Vecchietti Combining semantic and content based image retrieval in ORDBMS (Artículo de revista) Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 6277 LNAI (PART 2), pp. 44-53, 2010, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Alvez201044, title = {Combining semantic and content based image retrieval in ORDBMS}, author = { C.E. Alvez and A.R. Vecchietti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78449248786&partnerID=40&md5=bd685e050b61c9f063781341c0975091}, doi = {10.1007/978-3-642-15390-7_5}, year = {2010}, date = {2010-01-01}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {6277 LNAI}, number = {PART 2}, pages = {44-53}, abstract = {In this article, an architecture for image retrieval in an Object-Relational Database Management System is proposed. It combines the use of low-level descriptors and semantic metadata for similarity search. The architecture has three levels: content-based, semantic data and an interface integrating them. Several database User Defined Types (UDT) and operations are defined for that purpose. A case study about vehicles is implemented and results obtained show an important improvement in image similarity search. © 2010 Springer-Verlag.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this article, an architecture for image retrieval in an Object-Relational Database Management System is proposed. It combines the use of low-level descriptors and semantic metadata for similarity search. The architecture has three levels: content-based, semantic data and an interface integrating them. Several database User Defined Types (UDT) and operations are defined for that purpose. A case study about vehicles is implemented and results obtained show an important improvement in image similarity search. © 2010 Springer-Verlag. |
M.A. Rodriguez, A.R. Vecchietti Inventory and delivery optimization under seasonal demand in the supply chain (Artículo de revista) Computers and Chemical Engineering, 34 (10), pp. 1705-1718, 2010, (cited By 6). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rodriguez20101705, title = {Inventory and delivery optimization under seasonal demand in the supply chain}, author = { M.A. Rodriguez and A.R. Vecchietti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77956192365&partnerID=40&md5=6368877d07e009d3e137af5a2da3bd35}, doi = {10.1016/j.compchemeng.2010.02.009}, year = {2010}, date = {2010-01-01}, journal = {Computers and Chemical Engineering}, volume = {34}, number = {10}, pages = {1705-1718}, abstract = {This work deals with the inventory, purchase and delivery optimization problem in the supply chain. The formulation of two problems is presented involving several decision levels. The first one optimizes the company inventory and purchase tasks in a medium-term horizon planning, assuming that the total amount purchased is delivered at the beginning of each period. Then, in a more detailed formulation, the purchased amount is distributed among several deliveries giving rise to a non-linear non-convex problem. Some transformation techniques are evaluated to overcome the non-convexities in order to find a global solution in a reasonable execution time. Finally, the results obtained considering some possible scenarios are analyzed and compared. © Elsevier Ltd.}, note = {cited By 6}, keywords = {}, pubstate = {published}, tppubtype = {article} } This work deals with the inventory, purchase and delivery optimization problem in the supply chain. The formulation of two problems is presented involving several decision levels. The first one optimizes the company inventory and purchase tasks in a medium-term horizon planning, assuming that the total amount purchased is delivered at the beginning of each period. Then, in a more detailed formulation, the purchased amount is distributed among several deliveries giving rise to a non-linear non-convex problem. Some transformation techniques are evaluated to overcome the non-convexities in order to find a global solution in a reasonable execution time. Finally, the results obtained considering some possible scenarios are analyzed and compared. © Elsevier Ltd. |
M.L. Roldán, S. Gonnet, H.P. Leone TracED: A tool for capturing and tracing engineering design processes (Artículo de revista) Advances in Engineering Software, 41 (9), pp. 1087-1109, 2010, (cited By 9). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Roldán20101087, title = {TracED: A tool for capturing and tracing engineering design processes}, author = { M.L. Roldán and S. Gonnet and H.P. Leone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78049450788&partnerID=40&md5=611548f36790eae12388e3752878e623}, doi = {10.1016/j.advengsoft.2010.06.006}, year = {2010}, date = {2010-01-01}, journal = {Advances in Engineering Software}, volume = {41}, number = {9}, pages = {1087-1109}, abstract = {The design of products or production processes in many engineering disciplines such as chemical, or software engineering, involves many creative and sometimes unstructured activities, with an opportunistic control flow. During these design processes, several models are generated, which have different levels of abstraction of the object being designed. Given the difficulties in dealing with this complexity using an improvised way, there is an urgent need for tools that support the capture and tracing of this process. In this proposal, TracED, a computational environment to support the capture and tracing of engineering design process is presented. It allows defining a particular engineering design domain and supporting the capture of how products under development are transformed along an engineering design process. Particularly, in this work, we consider software architectures design domain. As in any complex process, the support of computational tools is required for enabling its capture. © 2010 Elsevier Ltd. All rights reserved.}, note = {cited By 9}, keywords = {}, pubstate = {published}, tppubtype = {article} } The design of products or production processes in many engineering disciplines such as chemical, or software engineering, involves many creative and sometimes unstructured activities, with an opportunistic control flow. During these design processes, several models are generated, which have different levels of abstraction of the object being designed. Given the difficulties in dealing with this complexity using an improvised way, there is an urgent need for tools that support the capture and tracing of this process. In this proposal, TracED, a computational environment to support the capture and tracing of engineering design process is presented. It allows defining a particular engineering design domain and supporting the capture of how products under development are transformed along an engineering design process. Particularly, in this work, we consider software architectures design domain. As in any complex process, the support of computational tools is required for enabling its capture. © 2010 Elsevier Ltd. All rights reserved. |
N. Alasino, M.C. Mussati, N.J. Scenna, P.A. Aguirre Wastewater treatment plant synthesis and design: Combined biological nitrogen and phosphorus removal (Artículo de revista) Industrial and Engineering Chemistry Research, 49 (18), pp. 8601-8612, 2010, (cited By 3). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Alasino20108601, title = {Wastewater treatment plant synthesis and design: Combined biological nitrogen and phosphorus removal}, author = { N. Alasino and M.C. Mussati and N.J. Scenna and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77956417010&partnerID=40&md5=12bd1a5faa43d71262510ec04f8fbac2}, doi = {10.1021/ie1000482}, year = {2010}, date = {2010-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {49}, number = {18}, pages = {8601-8612}, abstract = {In the present work, a previous superstructure model developed for simultaneous optimization of the process configuration and equipment dimensions, i.e., optimal process synthesis and design and the operation conditions of activated sludge wastewater treatment plants, is extended to account for phosphorus as well as nitrogen removal. Along the activated sludge treatment process, the wastewater stream is exposed to different environmental conditions (anaerobic, anoxic, and aerated zones) in order to facilitate the different microbiological processes such as the release and uptake of phosphorus and the nitrification/denitrification processes. The Activated Sludge Model No. 3 extended with the Bio-P module for computing biological phosphorus removal is used to model the reaction compartments and the Takàcs model for representing the secondary settler. The performance criterion selected is the minimization of the net present value that includes investment and operating costs while verifying compliance with the effluent permitted limits. The problem is posed as a NLP problem, specifically a nonlinear programming problem with discontinuous derivatives DNLP. The optimization model is implemented and solved using a General Algebraic Modeling System, GAMS. Optimal configurations and designs obtained for several case studies are reported and discussed. The model itself and the resolution methodology prove to be robust and flexible enough to solve efficiently scenarios with a wide range of operation conditions, embedding conventional and nonconventional process configurations. © 2010 American Chemical Society.}, note = {cited By 3}, keywords = {}, pubstate = {published}, tppubtype = {article} } In the present work, a previous superstructure model developed for simultaneous optimization of the process configuration and equipment dimensions, i.e., optimal process synthesis and design and the operation conditions of activated sludge wastewater treatment plants, is extended to account for phosphorus as well as nitrogen removal. Along the activated sludge treatment process, the wastewater stream is exposed to different environmental conditions (anaerobic, anoxic, and aerated zones) in order to facilitate the different microbiological processes such as the release and uptake of phosphorus and the nitrification/denitrification processes. The Activated Sludge Model No. 3 extended with the Bio-P module for computing biological phosphorus removal is used to model the reaction compartments and the Takàcs model for representing the secondary settler. The performance criterion selected is the minimization of the net present value that includes investment and operating costs while verifying compliance with the effluent permitted limits. The problem is posed as a NLP problem, specifically a nonlinear programming problem with discontinuous derivatives DNLP. The optimization model is implemented and solved using a General Algebraic Modeling System, GAMS. Optimal configurations and designs obtained for several case studies are reported and discussed. The model itself and the resolution methodology prove to be robust and flexible enough to solve efficiently scenarios with a wide range of operation conditions, embedding conventional and nonconventional process configurations. © 2010 American Chemical Society. |
M. Fuentes, O.A. Iribarren, M.C. Mussati, N. Scenna, P.A. Aguirre Modeling and optimization of biological sequential batch reactors (Artículo de revista) Computer Aided Chemical Engineering, 28 (C), pp. 295-300, 2010, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fuentes2010295, title = {Modeling and optimization of biological sequential batch reactors}, author = { M. Fuentes and O.A. Iribarren and M.C. Mussati and N. Scenna and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77955189057&partnerID=40&md5=0e441b9e6516f08c79b2329cc0728130}, doi = {10.1016/S1570-7946(10)28050-1}, year = {2010}, date = {2010-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {28}, number = {C}, pages = {295-300}, abstract = {This work deals with modeling and operation optimization of biological sequential batch reactors (SBR). The SBR is a fill-and-draw biological sludge system for wastewater treatment. In this system, wastewater is added to a single batch reactor, treated to remove undesirable components, and then, discharged. In this paper, a global model of a gas-solid-liquid SBR is presented to investigate and optimize operational strategies. The model can address the differences between aerated and anaerobic systems by assigning adequate parameter values related to the aeration and reaction systems. Fluctuating operation conditions during cycles such as disturbances in the organic loading rate, stirring rate and cycle time, result in strong numerical discontinuities that can be included in the simulation schedules. An existing set of experimental data is used to show a model application based on an anaerobic SBR. A good agreement was obtained between experimental and predicted values. Optimization results are based on minimizing the reaction time/total cycle time ratio subjected to path pH constraints and interior-and end-point constraints related to the pollutant removal efficiency and settling conditions. A decrease of 22% in the total cycle time, i.e. an increase in the organic loading rate from 787 to 985 mg dm-3 d-1 is reached without modifying the quality of effluent. © 2010 Elsevier B.V.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } This work deals with modeling and operation optimization of biological sequential batch reactors (SBR). The SBR is a fill-and-draw biological sludge system for wastewater treatment. In this system, wastewater is added to a single batch reactor, treated to remove undesirable components, and then, discharged. In this paper, a global model of a gas-solid-liquid SBR is presented to investigate and optimize operational strategies. The model can address the differences between aerated and anaerobic systems by assigning adequate parameter values related to the aeration and reaction systems. Fluctuating operation conditions during cycles such as disturbances in the organic loading rate, stirring rate and cycle time, result in strong numerical discontinuities that can be included in the simulation schedules. An existing set of experimental data is used to show a model application based on an anaerobic SBR. A good agreement was obtained between experimental and predicted values. Optimization results are based on minimizing the reaction time/total cycle time ratio subjected to path pH constraints and interior-and end-point constraints related to the pollutant removal efficiency and settling conditions. A decrease of 22% in the total cycle time, i.e. an increase in the organic loading rate from 787 to 985 mg dm-3 d-1 is reached without modifying the quality of effluent. © 2010 Elsevier B.V. |
M.G. Marcovecchio, N.J. Scenna, P.A. Aguirre Improvements of a hollow fiber reverse osmosis desalination model: Analysis of numerical results (Artículo de revista) Chemical Engineering Research and Design, 88 (7), pp. 789-802, 2010, (cited By 8). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Marcovecchio2010789, title = {Improvements of a hollow fiber reverse osmosis desalination model: Analysis of numerical results}, author = { M.G. Marcovecchio and N.J. Scenna and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77953536795&partnerID=40&md5=e5f3d43587c24d320c362c0add826d95}, doi = {10.1016/j.cherd.2009.12.003}, year = {2010}, date = {2010-01-01}, journal = {Chemical Engineering Research and Design}, volume = {88}, number = {7}, pages = {789-802}, abstract = {In this work, a rigorous model describing the processes taking place in hollow fiber modules for reverse osmosis desalination is analyzed. The Kimura-Sourirajan model is used for describing transport phenomena through the membrane. The concentration polarization phenomenon is mathematically described using the film theory, while the Hagen-Poiseuille and Ergun equations describe the pressure drop in the fiber bore and on the shell side of the fiber bundle, respectively. Improving the previous model, in this work the salt concentration of the permeate accumulated along the fiber is calculated from appropriate mass balances. Hence, the osmotic pressure and the water and salt fluxes through the membrane that depend on this concentration change through the module; and it also influences indirectly the calculation of other process parameters. The solutions of all the differential equations involved in the model are accurately approximated by the finite differences method applied over an appropriate discretization. The value of the output variables changes less than 1% when the finite difference mesh is increased from 6 to 7 grid points in the range of each domain, axial and radial. The flow rates and salt concentrations profiles obtained by the proposed model are analyzed. The influences of the transmembrane and osmotic pressures over the permeate flow rates and salt concentrations are studied. The effect of incorporating the accumulated permeate salinity is showed. It is proved that errors committed by ignoring the permeate accumulated salinity can be significant. Sensitivity analysis for the permeate flow rate and permeate salt concentration is performed by studying the influence of different kind of data: input variables, physical coefficients and design variables. © 2010.}, note = {cited By 8}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this work, a rigorous model describing the processes taking place in hollow fiber modules for reverse osmosis desalination is analyzed. The Kimura-Sourirajan model is used for describing transport phenomena through the membrane. The concentration polarization phenomenon is mathematically described using the film theory, while the Hagen-Poiseuille and Ergun equations describe the pressure drop in the fiber bore and on the shell side of the fiber bundle, respectively. Improving the previous model, in this work the salt concentration of the permeate accumulated along the fiber is calculated from appropriate mass balances. Hence, the osmotic pressure and the water and salt fluxes through the membrane that depend on this concentration change through the module; and it also influences indirectly the calculation of other process parameters. The solutions of all the differential equations involved in the model are accurately approximated by the finite differences method applied over an appropriate discretization. The value of the output variables changes less than 1% when the finite difference mesh is increased from 6 to 7 grid points in the range of each domain, axial and radial. The flow rates and salt concentrations profiles obtained by the proposed model are analyzed. The influences of the transmembrane and osmotic pressures over the permeate flow rates and salt concentrations are studied. The effect of incorporating the accumulated permeate salinity is showed. It is proved that errors committed by ignoring the permeate accumulated salinity can be significant. Sensitivity analysis for the permeate flow rate and permeate salt concentration is performed by studying the influence of different kind of data: input variables, physical coefficients and design variables. © 2010. |
J.A. Francesconi, M.C. Mussati, P.A. Aguirre Effects of PEMFC operating parameters on the performance of an integrated ethanol processor (Artículo de revista) International Journal of Hydrogen Energy, 35 (11), pp. 5940-5946, 2010, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Francesconi20105940, title = {Effects of PEMFC operating parameters on the performance of an integrated ethanol processor}, author = { J.A. Francesconi and M.C. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77955285302&partnerID=40&md5=5236e2465cad1eeb7ab0687740e9e002}, doi = {10.1016/j.ijhydene.2009.12.103}, year = {2010}, date = {2010-01-01}, journal = {International Journal of Hydrogen Energy}, volume = {35}, number = {11}, pages = {5940-5946}, abstract = {In this paper the performance of a complete fuel cell system processing ethanol fuel has been analyzed as a function of the main fuel cell operating parameters. The fuel processor is based on the steam reforming process, followed by high- and low-temperature shift reactors, and carbon monoxide preferential oxidation reactor, which are coupled to a polymeric fuel cell (PEMFC). The goal was to analyze and improve the fuel cell system performance by simulation techniques. PEMFC operation has been analyzed using an available parametric model, which was implemented within HYSYS environment software. Pinch Analysis concepts were used to investigate the process energy integration and determine the maximum efficiency minimizing ethanol consumption. The system performance was analyzed for the SR-12 Modular PEM Generator, the Ballard Mark V fuel cell and the BCS 500 W stack. The net system efficiency is dependent on the required power demand. Efficiency values higher than 50% at low loads and less than 30% at high power demands are computed. In addition, the effect of fuel cell temperature, pressure and hydrogen utilization was analyzed. The trade-off between the reformer yield and the fuel cell performance defines the optimal operation pressure. The cell temperature determines operating zones where the water, involved in the reforming reactions, can be produced or demanded. © 2009 Professor T. Nejat Veziroglu. Published by Elsevier Ltd. All rights reserved.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper the performance of a complete fuel cell system processing ethanol fuel has been analyzed as a function of the main fuel cell operating parameters. The fuel processor is based on the steam reforming process, followed by high- and low-temperature shift reactors, and carbon monoxide preferential oxidation reactor, which are coupled to a polymeric fuel cell (PEMFC). The goal was to analyze and improve the fuel cell system performance by simulation techniques. PEMFC operation has been analyzed using an available parametric model, which was implemented within HYSYS environment software. Pinch Analysis concepts were used to investigate the process energy integration and determine the maximum efficiency minimizing ethanol consumption. The system performance was analyzed for the SR-12 Modular PEM Generator, the Ballard Mark V fuel cell and the BCS 500 W stack. The net system efficiency is dependent on the required power demand. Efficiency values higher than 50% at low loads and less than 30% at high power demands are computed. In addition, the effect of fuel cell temperature, pressure and hydrogen utilization was analyzed. The trade-off between the reformer yield and the fuel cell performance defines the optimal operation pressure. The cell temperature determines operating zones where the water, involved in the reforming reactions, can be produced or demanded. © 2009 Professor T. Nejat Veziroglu. Published by Elsevier Ltd. All rights reserved. |
D.G. Oliva, J.A. Francesconi, M.C. Mussati, P.A. Aguirre Energy efficiency analysis of an integrated glycerin processor for PEM fuel cells: Comparison with an ethanol-based system (Artículo de revista) International Journal of Hydrogen Energy, 35 (2), pp. 709-724, 2010, (cited By 9). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Oliva2010709, title = {Energy efficiency analysis of an integrated glycerin processor for PEM fuel cells: Comparison with an ethanol-based system}, author = { D.G. Oliva and J.A. Francesconi and M.C. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-73749083220&partnerID=40&md5=7311d6d5b54634a4c53f7b1899636c3a}, doi = {10.1016/j.ijhydene.2009.10.082}, year = {2010}, date = {2010-01-01}, journal = {International Journal of Hydrogen Energy}, volume = {35}, number = {2}, pages = {709-724}, abstract = {The aim of this work is to analyze energetically the use of glycerin as the primary hydrogen source to operate a proton exchange membrane fuel cell. A glycerin processor system based on its steam reforming is described departing from a previous process model developed for ethanol processing. Since about 10% w/w of glycerin is produced as a byproduct when vegetable oils are converted into biodiesel, and due to the later is increasing its production abruptly, a large glycerin excess is expected to oversaturate the market. The reformed stream contains mainly H2 but also CO, CO2, H2O and CH4. As CO is a poison for PEM fuel cell type, a stream purification step is previously required. The purification subsystem consists of two water gas shift reactors and a CO preferential oxidation reactor to reduce the CO levels below 10 ppm. The reforming process is governed by endothermic reactions, requiring thus energy to proceed. Depending on the system operation point, the energy requirements can be fulfilled by burning an extra glycerin amount (to be determined), which is the minimal that meets the energy requirements. In addition a self-sufficient operation region can be distinguished. In this context, the water/glycerin molar ratio, the glycerin steam reformer temperature, the system pressure, and the extra glycerin amount to be burned (if necessary) are the main decision variables subject to analysis. Process variables are calculated simultaneously, updating the composite curves at each iteration to obtain the best possible energy integration of the process. The highest net system efficiency value computed is 38.56% based on the lower heating value, and 34.71% based on the higher heating value. These efficiency values correspond to a pressure of 2 atm, a water/glycerin molar ratio of 5, a glycerin steam reformer temperature of 953 K, and an extra glycerin amount burned of 0.27 mol h-1. Based on the main process variables, suitable system operation zones are identified. As in practice, most PEM fuel cells operate at 3 atm, optimal variable values obtained at this condition are also reported. Finally, some results and aspects on the system performance of both glycerin and ethanol processors operated at 3 atm are compared and discussed. © 2009 Professor T. Nejat Veziroglu.}, note = {cited By 9}, keywords = {}, pubstate = {published}, tppubtype = {article} } The aim of this work is to analyze energetically the use of glycerin as the primary hydrogen source to operate a proton exchange membrane fuel cell. A glycerin processor system based on its steam reforming is described departing from a previous process model developed for ethanol processing. Since about 10% w/w of glycerin is produced as a byproduct when vegetable oils are converted into biodiesel, and due to the later is increasing its production abruptly, a large glycerin excess is expected to oversaturate the market. The reformed stream contains mainly H2 but also CO, CO2, H2O and CH4. As CO is a poison for PEM fuel cell type, a stream purification step is previously required. The purification subsystem consists of two water gas shift reactors and a CO preferential oxidation reactor to reduce the CO levels below 10 ppm. The reforming process is governed by endothermic reactions, requiring thus energy to proceed. Depending on the system operation point, the energy requirements can be fulfilled by burning an extra glycerin amount (to be determined), which is the minimal that meets the energy requirements. In addition a self-sufficient operation region can be distinguished. In this context, the water/glycerin molar ratio, the glycerin steam reformer temperature, the system pressure, and the extra glycerin amount to be burned (if necessary) are the main decision variables subject to analysis. Process variables are calculated simultaneously, updating the composite curves at each iteration to obtain the best possible energy integration of the process. The highest net system efficiency value computed is 38.56% based on the lower heating value, and 34.71% based on the higher heating value. These efficiency values correspond to a pressure of 2 atm, a water/glycerin molar ratio of 5, a glycerin steam reformer temperature of 953 K, and an extra glycerin amount burned of 0.27 mol h-1. Based on the main process variables, suitable system operation zones are identified. As in practice, most PEM fuel cells operate at 3 atm, optimal variable values obtained at this condition are also reported. Finally, some results and aspects on the system performance of both glycerin and ethanol processors operated at 3 atm are compared and discussed. © 2009 Professor T. Nejat Veziroglu. |
M.A. Reinheimer, S.F. Mussati, N.J. Scenna Influence of product composition and operating conditions on the unsteady behavior of hard candy cooling process (Artículo de revista) Journal of Food Engineering, 101 (4), pp. 409-416, 2010, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Reinheimer2010409, title = {Influence of product composition and operating conditions on the unsteady behavior of hard candy cooling process}, author = { M.A. Reinheimer and S.F. Mussati and N.J. Scenna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77956187100&partnerID=40&md5=0fb9a0a000470326873e92c25b9c67c7}, doi = {10.1016/j.jfoodeng.2010.07.029}, year = {2010}, date = {2010-01-01}, journal = {Journal of Food Engineering}, volume = {101}, number = {4}, pages = {409-416}, abstract = {On large industrial scales, the cooling stage in the production process of hard candies is one of the most critical unit operations. The main problems affecting final hard candies quality as regards the cooling process are: deformation, fragility and aggregation. The main operating conditions of the cooling stage are temperature and velocity of cooling air as well as the residence time of candy inside the tunnel. The objective of this work is to study the influence of process operating conditions and candy composition on the unsteady behavior of the cooling process of hard candies to improve final product quality. The study is conducted by using a simple mathematical model which was implemented and solved by using gPROMS (general Process Modeling System). A detailed discussion of results is presented through several examples. © 2010 Elsevier Ltd. All rights reserved.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } On large industrial scales, the cooling stage in the production process of hard candies is one of the most critical unit operations. The main problems affecting final hard candies quality as regards the cooling process are: deformation, fragility and aggregation. The main operating conditions of the cooling stage are temperature and velocity of cooling air as well as the residence time of candy inside the tunnel. The objective of this work is to study the influence of process operating conditions and candy composition on the unsteady behavior of the cooling process of hard candies to improve final product quality. The study is conducted by using a simple mathematical model which was implemented and solved by using gPROMS (general Process Modeling System). A detailed discussion of results is presented through several examples. © 2010 Elsevier Ltd. All rights reserved. |
M.A. Reinheimer, S.F. Mussati, N.J. Scenna, G.A. Pérez Influence of the microstructure and composition on the thermal-physical properties of hard candy and cooling process (Artículo de revista) Journal of Molecular Structure, 980 (1-3), pp. 250-256, 2010, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Reinheimer2010250, title = {Influence of the microstructure and composition on the thermal-physical properties of hard candy and cooling process}, author = { M.A. Reinheimer and S.F. Mussati and N.J. Scenna and G.A. Pérez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77956058876&partnerID=40&md5=171fd6105e1c32849d8e469e015dd03f}, doi = {10.1016/j.molstruc.2010.07.027}, year = {2010}, date = {2010-01-01}, journal = {Journal of Molecular Structure}, volume = {980}, number = {1-3}, pages = {250-256}, abstract = {In this paper, glass transition temperature (Tg) and microstructure of hard candy honey flavored have been investigated using differential scanning calorimetry (DSC) data and scanning electron microscopy images (SEM) respectively. Precisely, the glass transition temperature can be used as reference temperature to determine the operating mode of processing stages. In fact, the temperature at which hard candies may leave the cooling stage has to be equal or lower than 34 °C in order to ensure the glassy state and therefore improve product shelf life; due to the fact that the experimental results indicated a temperature range of glass transition of 35.36 ± 1.48-36.37 ± 1.63 °C. As regards to the microstructure, SEM images reveal overlapping of layers at samples edges which could be attributed to the water absorption from the environment leading to storage problems, like crystallization. In addition, micrographics also reveal the presence of air bubbles which may negatively affect the temperature profile inside the candy and consequently may change the operating mode of the cooling equipment. The influence of the air bubbles on the thermal conductivity of the candy is also investigated. © 2010 Elsevier B.V. All rights reserved.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper, glass transition temperature (Tg) and microstructure of hard candy honey flavored have been investigated using differential scanning calorimetry (DSC) data and scanning electron microscopy images (SEM) respectively. Precisely, the glass transition temperature can be used as reference temperature to determine the operating mode of processing stages. In fact, the temperature at which hard candies may leave the cooling stage has to be equal or lower than 34 °C in order to ensure the glassy state and therefore improve product shelf life; due to the fact that the experimental results indicated a temperature range of glass transition of 35.36 ± 1.48-36.37 ± 1.63 °C. As regards to the microstructure, SEM images reveal overlapping of layers at samples edges which could be attributed to the water absorption from the environment leading to storage problems, like crystallization. In addition, micrographics also reveal the presence of air bubbles which may negatively affect the temperature profile inside the candy and consequently may change the operating mode of the cooling equipment. The influence of the air bubbles on the thermal conductivity of the candy is also investigated. © 2010 Elsevier B.V. All rights reserved. |
E.E. Tarifa, F.N. Álvaro, S. Franco, S.F. Mussati Fault diagnosis for an MSF desalination plant by using Bayesian networks (Artículo de revista) Desalination and Water Treatment, 21 (1-3), pp. 102-108, 2010, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Tarifa2010102, title = {Fault diagnosis for an MSF desalination plant by using Bayesian networks}, author = { E.E. Tarifa and F.N. Álvaro and S. Franco and S.F. Mussati}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78449279751&partnerID=40&md5=44b73dd1f420a82d09f7277bcf34c077}, doi = {10.5004/dwt.2010.1265}, year = {2010}, date = {2010-01-01}, journal = {Desalination and Water Treatment}, volume = {21}, number = {1-3}, pages = {102-108}, abstract = {This work outlines the development of a fault diagnostic system for an MSF (multi-stage flash) desalination plant by using BNs (Bayesian networks). This diagnostic system processes the plant data to determine whether the process state is normal or not. In the latter case, the diagnostic system determines the cause of the abnormal process state; i.e., it finds out which is the fault that is affecting the supervised process. A BN is a graphical model that encodes probabilistic relationships among variables of interest. When used in conjunction with statistical techniques, the graphical model has several advantages for data analysis. A BN readily handles situations where some data entries are missing. This paper determines both the structure and parameters of a BN intended for a diagnostic system. The implemented system is evaluated by using a dynamic simulator, which was developed for a real MSF desalination plant. Besides, the diagnostic system performance is compared with the performances of two other diagnostic systems. The obtained results show some advantages for the BN based diagnostic system. © 2010 Desalination Publications.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } This work outlines the development of a fault diagnostic system for an MSF (multi-stage flash) desalination plant by using BNs (Bayesian networks). This diagnostic system processes the plant data to determine whether the process state is normal or not. In the latter case, the diagnostic system determines the cause of the abnormal process state; i.e., it finds out which is the fault that is affecting the supervised process. A BN is a graphical model that encodes probabilistic relationships among variables of interest. When used in conjunction with statistical techniques, the graphical model has several advantages for data analysis. A BN readily handles situations where some data entries are missing. This paper determines both the structure and parameters of a BN intended for a diagnostic system. The implemented system is evaluated by using a dynamic simulator, which was developed for a real MSF desalination plant. Besides, the diagnostic system performance is compared with the performances of two other diagnostic systems. The obtained results show some advantages for the BN based diagnostic system. © 2010 Desalination Publications. |
L.C. Ballejos, J.M. Montagna Identifying interorganisational networks: A factor-based approach (Artículo de revista) International Journal of Networking and Virtual Organisations, 7 (1), pp. 1-22, 2010, (cited By 6). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Ballejos20101, title = {Identifying interorganisational networks: A factor-based approach}, author = { L.C. Ballejos and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-72149086127&partnerID=40&md5=b173f453b061f160f4c5703078cdca87}, doi = {10.1504/IJNVO.2010.029867}, year = {2010}, date = {2010-01-01}, journal = {International Journal of Networking and Virtual Organisations}, volume = {7}, number = {1}, pages = {1-22}, abstract = {This paper deeply analyses Interorganisational Networks (IONs), reaching the understanding of their creation and operation in different contexts. The rapid growth of IONs has not yet resulted in any systematic characterisation for them. Indeed, the literature has primarily discussed specific types, but there is a need to determine a set of factors that best describes them and, at the same time, helps in their individualisation and distinction from others. Thus, a framework to identify the IONs is developed. It is formed by diverse factors divided in three perspectives: organisational, interorganisational and technological. This will allow users and developers to understand this complex context, specify their needs and establish a common basis for IONs' application and understanding, including the aspects of their design and analysis when implementing Interorganisational Information Systems (IOSs), etc. As an example, the decisions at different levels during Supply Chain Networks' (SCNs) operation are analysed. © 2010 Inderscience Enterprises Ltd.}, note = {cited By 6}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper deeply analyses Interorganisational Networks (IONs), reaching the understanding of their creation and operation in different contexts. The rapid growth of IONs has not yet resulted in any systematic characterisation for them. Indeed, the literature has primarily discussed specific types, but there is a need to determine a set of factors that best describes them and, at the same time, helps in their individualisation and distinction from others. Thus, a framework to identify the IONs is developed. It is formed by diverse factors divided in three perspectives: organisational, interorganisational and technological. This will allow users and developers to understand this complex context, specify their needs and establish a common basis for IONs' application and understanding, including the aspects of their design and analysis when implementing Interorganisational Information Systems (IOSs), etc. As an example, the decisions at different levels during Supply Chain Networks' (SCNs) operation are analysed. © 2010 Inderscience Enterprises Ltd. |
A.H. González, E.J. Adam, M.G. Marcovecchio, D. Odloak Application of IHMPC to an unstable reactor system: Study of feasibility and performance (Conferencia) 9 (PART 1), 2010, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{González2010284, title = {Application of IHMPC to an unstable reactor system: Study of feasibility and performance}, author = { A.H. González and E.J. Adam and M.G. Marcovecchio and D. Odloak}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80051750551&partnerID=40&md5=da1d9186fc39bfa7b2731c5ed9619faf}, doi = {10.3182/20100705-3-BE-2011.0081}, year = {2010}, date = {2010-01-01}, journal = {IFAC Proceedings Volumes (IFAC-PapersOnline)}, volume = {9}, number = {PART 1}, pages = {284-289}, abstract = {Almost all the theoretical aspects of Model Predictive Control (MPC), such as stability, recursive feasibility and even the optimality are now well established for both, the nominal and the robust case. The stability and recursive feasibility are usually guaranteed by means of additional terminal constraints, while the optimality is achieved considering closed-loop predictions. However, these significant improvements are not always applicable to real processes. An interesting case is the control of open-loop unstable reactor systems. There, the traditional infinite horizon MPC (IHMPC), which constitutes the simplest strategy ensuring stability, needs to include an additional terminal constraint to cancel the unstable modes, producing in this way feasibility problems. The terminal constraint could be an equality or an inclusion constraint, depending on the local controller assumed for predictions. In both cases, however, a prohibitive length of the control horizon is necessary to produce a reasonable domain of attraction for real applications. In this work, we propose an IHMPC formulation that has maximal domain of attraction (i.e. the domain of attraction is determined by the system and the constraints, and not by the controller) and is suitable for real applications in the sense that it accounts for the case of output tracking, it is offset free if the output target is reachable, and minimizes the offset if some of the constraints become active at steady-state. © 2009 IFAC.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Almost all the theoretical aspects of Model Predictive Control (MPC), such as stability, recursive feasibility and even the optimality are now well established for both, the nominal and the robust case. The stability and recursive feasibility are usually guaranteed by means of additional terminal constraints, while the optimality is achieved considering closed-loop predictions. However, these significant improvements are not always applicable to real processes. An interesting case is the control of open-loop unstable reactor systems. There, the traditional infinite horizon MPC (IHMPC), which constitutes the simplest strategy ensuring stability, needs to include an additional terminal constraint to cancel the unstable modes, producing in this way feasibility problems. The terminal constraint could be an equality or an inclusion constraint, depending on the local controller assumed for predictions. In both cases, however, a prohibitive length of the control horizon is necessary to produce a reasonable domain of attraction for real applications. In this work, we propose an IHMPC formulation that has maximal domain of attraction (i.e. the domain of attraction is determined by the system and the constraints, and not by the controller) and is suitable for real applications in the sense that it accounts for the case of output tracking, it is offset free if the output target is reachable, and minimizes the offset if some of the constraints become active at steady-state. © 2009 IFAC. |
2009 |
E.C. Martínez, M.D. Cristaldi, R.J. Grau Design of dynamic experiments in modeling for optimization of batch processes (Artículo de revista) Industrial and Engineering Chemistry Research, 48 (7), pp. 3453-3465, 2009, (cited By 10). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Martínez20093453, title = {Design of dynamic experiments in modeling for optimization of batch processes}, author = { E.C. Martínez and M.D. Cristaldi and R.J. Grau}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-65249146640&partnerID=40&md5=ab67d428ed4c8239c82649743d52d15b}, doi = {10.1021/ie8000953}, year = {2009}, date = {2009-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {48}, number = {7}, pages = {3453-3465}, abstract = {Finding optimal operating conditions fast with a scarce budget of experimental runs is a key problem to speeding up the development of innovative products and processes. Modeling for optimization is proposed as a systematic approach to bias data gathering for iterative policy improvement through experimental design using first-principles models. Designing dynamic experiments that are optimally informative in order to reduce the uncertainty about the optimal operating conditions is addressed by integrating policy iteration based on the Hamilton-Jacobi-Bellman optimality equation with global sensitivity analysis. A conceptual framework for run-to-run convergence of a model-based policy iteration algorithm is proposed. Results obtained in the fed-batch fermentation of penicillin G are presented. The well-known Bajpai and Reuss bioreactor model validated with industrial data is used to increase on a run-to-run basis the amount of penicillin obtained by input policy optimization and selective (re)estimation of relevant model parameters. A remarkable improvement in productivity can be gain using a simple policy structure after only two modeling runs despite initial modeling uncertainty. © 2009 American Chemical Society.}, note = {cited By 10}, keywords = {}, pubstate = {published}, tppubtype = {article} } Finding optimal operating conditions fast with a scarce budget of experimental runs is a key problem to speeding up the development of innovative products and processes. Modeling for optimization is proposed as a systematic approach to bias data gathering for iterative policy improvement through experimental design using first-principles models. Designing dynamic experiments that are optimally informative in order to reduce the uncertainty about the optimal operating conditions is addressed by integrating policy iteration based on the Hamilton-Jacobi-Bellman optimality equation with global sensitivity analysis. A conceptual framework for run-to-run convergence of a model-based policy iteration algorithm is proposed. Results obtained in the fed-batch fermentation of penicillin G are presented. The well-known Bajpai and Reuss bioreactor model validated with industrial data is used to increase on a run-to-run basis the amount of penicillin obtained by input policy optimization and selective (re)estimation of relevant model parameters. A remarkable improvement in productivity can be gain using a simple policy structure after only two modeling runs despite initial modeling uncertainty. © 2009 American Chemical Society. |
A. Assandri, C. De Prada, S. Cristea, E.C. Martínez Nonlinear parametric predictive control. Application to a continuous stirred tank reactor (Artículo de revista) Asia-Pacific Journal of Chemical Engineering, 4 (6), pp. 858-869, 2009, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Assandri2009858, title = {Nonlinear parametric predictive control. Application to a continuous stirred tank reactor}, author = { A. Assandri and C. De Prada and S. Cristea and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-73449124801&partnerID=40&md5=75fdb4a68e3409981a76be80c966441f}, doi = {10.1002/apj.331}, year = {2009}, date = {2009-01-01}, journal = {Asia-Pacific Journal of Chemical Engineering}, volume = {4}, number = {6}, pages = {858-869}, abstract = {This paper presents a nonlinear model-based controller based on the ideas of parametric predictive control applied to a continuous stirred tank reactor (CSTR) process unit. Controller design aims at avoiding the complexity of implementation and long computational times associated with conventional NMPC while maintaining the main advantage of taking into account process nonlinearities that are relevant for control. The design of the parametric predictive controller is based on a rather simplified process model having parameters that are instrumental in determining the required changes to the manipulated variables for error reduction. The nonlinear controller is easy to tune and can operate successfully over a wide range of operating conditions. The use of an estimator of unmeasured disturbances and process-model mismatch further enhances the behavior of the controller. © 2009 Curtin University of Technology and John Wiley & Sons, Ltd.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper presents a nonlinear model-based controller based on the ideas of parametric predictive control applied to a continuous stirred tank reactor (CSTR) process unit. Controller design aims at avoiding the complexity of implementation and long computational times associated with conventional NMPC while maintaining the main advantage of taking into account process nonlinearities that are relevant for control. The design of the parametric predictive controller is based on a rather simplified process model having parameters that are instrumental in determining the required changes to the manipulated variables for error reduction. The nonlinear controller is easy to tune and can operate successfully over a wide range of operating conditions. The use of an estimator of unmeasured disturbances and process-model mismatch further enhances the behavior of the controller. © 2009 Curtin University of Technology and John Wiley & Sons, Ltd. |
M. Rolón, M. Canavesio, E.C. Martínez Agent Based Modelling and Simulation of Intelligent Distributed Scheduling Systems (Artículo de revista) Computer Aided Chemical Engineering, 26 , pp. 985-990, 2009, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rolón2009985, title = {Agent Based Modelling and Simulation of Intelligent Distributed Scheduling Systems}, author = { M. Rolón and M. Canavesio and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-67649120015&partnerID=40&md5=58cc3f03ddf7bc405c62cdbb4f80bac0}, doi = {10.1016/S1570-7946(09)70164-6}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {26}, pages = {985-990}, abstract = {For responsiveness and agility disruptive events must be managed locally to avoid propagating the effects along the value chain. In this work, emergent distributed scheduling is proposed to overcome the traditional separation between task scheduling and manufacturing execution systems. An interaction mechanism designed around the concept of order and resource agents acting as autonomic managers is described. Results obtained for different scenarios using a simulation model of the mechanism implemented in Netlogo are presented. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } For responsiveness and agility disruptive events must be managed locally to avoid propagating the effects along the value chain. In this work, emergent distributed scheduling is proposed to overcome the traditional separation between task scheduling and manufacturing execution systems. An interaction mechanism designed around the concept of order and resource agents acting as autonomic managers is described. Results obtained for different scenarios using a simulation model of the mechanism implemented in Netlogo are presented. © 2009 Elsevier B.V. All rights reserved. |
M. De Paula, E.C. Martínez Data-driven generation of multi-modal control programs for continuous-discrete processes (Artículo de revista) Computer Aided Chemical Engineering, 26 , pp. 261-266, 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{DePaula2009261, title = {Data-driven generation of multi-modal control programs for continuous-discrete processes}, author = { M. De Paula and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-67649124458&partnerID=40&md5=b357a27265418ea4bc1b8682a5fe1656}, doi = {10.1016/S1570-7946(09)70044-6}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {26}, pages = {261-266}, abstract = {Multi-modal control is an increasingly used design tool for supervisory control of complex systems by resorting to sequences of modes each one comprising of a feedback law and stopping condition. In this paper, the problem of developing multi-modal control programs from a given mode alphabet using data is addressed. By viewing the control space as a set of tokenized instructions rather than as real-valued signals, reinforcement learning algorithms becomes applicable to develop optimal control strategies for continuous-discrete processes using a Lebesgue-sampled finite state machine. A case study related to capacity management of a buffer tank in a petrochemical plant is presented. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Multi-modal control is an increasingly used design tool for supervisory control of complex systems by resorting to sequences of modes each one comprising of a feedback law and stopping condition. In this paper, the problem of developing multi-modal control programs from a given mode alphabet using data is addressed. By viewing the control space as a set of tokenized instructions rather than as real-valued signals, reinforcement learning algorithms becomes applicable to develop optimal control strategies for continuous-discrete processes using a Lebesgue-sampled finite state machine. A case study related to capacity management of a buffer tank in a petrochemical plant is presented. © 2009 Elsevier B.V. All rights reserved. |
M. Cristaldi, R. Grau, E.C. Martínez Iterative design of dynamic experiments in modeling for optimization of innovative bioprocesses (Artículo de revista) Chemical Product and Process Modeling, 4 (2), 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Cristaldi2009, title = {Iterative design of dynamic experiments in modeling for optimization of innovative bioprocesses}, author = { M. Cristaldi and R. Grau and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-67650486459&partnerID=40&md5=d9b0b03c087d368362933c56e45eaa81}, year = {2009}, date = {2009-01-01}, journal = {Chemical Product and Process Modeling}, volume = {4}, number = {2}, abstract = {Finding optimal operating conditions fast with a scarce budget of experimental runs is a key problem to speed up the development and scaling up of innovative bioprocesses. In this paper, a novel iterative methodology for the model-based design of dynamic experiments in modeling for optimization is developed and successfully applied to the optimization of a fed-batch bioreactor related to the production of r-interleukin-11 (rIL-11) whose DNA sequence has been cloned in an Escherichia coli strain. At each iteration, the proposed methodology resorts to a library of tendency models to increasingly bias bioreactor operating conditions towards an optimum. By selecting the 'most informative' tendency model in the sequel, the next dynamic experiment is defined by re-optimizing the input policy and calculating optimal sampling times. Model selection is based on minimizing an error measure which distinguishes between parametric and structural uncertainty to selectively bias data gathering towards improved operating conditions. The parametric uncertainty of tendency models is iteratively reduced using Global Sensitivity Analysis (GSA) to pinpoint which parameters are keys for estimating the objective function. Results obtained after just a few iterations are very promising. © 2009 The Berkeley Electronic Press.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Finding optimal operating conditions fast with a scarce budget of experimental runs is a key problem to speed up the development and scaling up of innovative bioprocesses. In this paper, a novel iterative methodology for the model-based design of dynamic experiments in modeling for optimization is developed and successfully applied to the optimization of a fed-batch bioreactor related to the production of r-interleukin-11 (rIL-11) whose DNA sequence has been cloned in an Escherichia coli strain. At each iteration, the proposed methodology resorts to a library of tendency models to increasingly bias bioreactor operating conditions towards an optimum. By selecting the 'most informative' tendency model in the sequel, the next dynamic experiment is defined by re-optimizing the input policy and calculating optimal sampling times. Model selection is based on minimizing an error measure which distinguishes between parametric and structural uncertainty to selectively bias data gathering towards improved operating conditions. The parametric uncertainty of tendency models is iteratively reduced using Global Sensitivity Analysis (GSA) to pinpoint which parameters are keys for estimating the objective function. Results obtained after just a few iterations are very promising. © 2009 The Berkeley Electronic Press. |
R.A. Ghraizi, E.C. Martínez, C. De Prada Control loop performance monitoring using the permutation entropy of error residuals (Conferencia) 7 (PART 1), 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Ghraizi2009494, title = {Control loop performance monitoring using the permutation entropy of error residuals}, author = { R.A. Ghraizi and E.C. Martínez and C. De Prada}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960942809&partnerID=40&md5=fc8aa6564132584b161f3e5756676315}, year = {2009}, date = {2009-01-01}, journal = {IFAC Proceedings Volumes (IFAC-PapersOnline)}, volume = {7}, number = {PART 1}, pages = {494-499}, abstract = {The predictability of a control-loop behavior beyond its control horizon is an unambiguous indication of loop malfunctioning. Based on the dynamic complexity of the error residual time series the permutation entropy is proposed to define a sensitive index for performance monitoring using data from close-loop operation. A generic framework to understand and quantify the distinctive increase in predictability of the controller error resulting from ill-tuning, sensor errors and actuator faults using a entropy-like index is presented. The dynamic complexity of a well-performing control loop should correspond to the maximum entropy. As loop performance degrades the entropy of its residual time series decreases and any loss of dynamic complexity in the control system gives rise to an increase of the predictability of the control error time series. Results obtained using the proposed performance index along with its confidence interval for industrial data sets are presented to discuss the influence of the sample size, control horizon, and variance estimation in the assessment of close-loop performance.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The predictability of a control-loop behavior beyond its control horizon is an unambiguous indication of loop malfunctioning. Based on the dynamic complexity of the error residual time series the permutation entropy is proposed to define a sensitive index for performance monitoring using data from close-loop operation. A generic framework to understand and quantify the distinctive increase in predictability of the controller error resulting from ill-tuning, sensor errors and actuator faults using a entropy-like index is presented. The dynamic complexity of a well-performing control loop should correspond to the maximum entropy. As loop performance degrades the entropy of its residual time series decreases and any loss of dynamic complexity in the control system gives rise to an increase of the predictability of the control error time series. Results obtained using the proposed performance index along with its confidence interval for industrial data sets are presented to discuss the influence of the sample size, control horizon, and variance estimation in the assessment of close-loop performance. |
M. De Paula, E.C. Martínez Development of multi-modal control programs for continuous-discrete process supervision (Artículo de revista) Computer Aided Chemical Engineering, 27 (C), pp. 1383-1388, 2009, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{DePaula20091383, title = {Development of multi-modal control programs for continuous-discrete process supervision}, author = { M. De Paula and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77649333637&partnerID=40&md5=df0ee0d83cfcad6334afbc4b05a337a8}, doi = {10.1016/S1570-7946(09)70621-2}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {27}, number = {C}, pages = {1383-1388}, abstract = {Continuos-discrete processes are characterized by the strong coupling of continuos state dynamics of recipe-driven discontinuos operations and the discrete state dynamics of logic-based controllers acting at the interface with continuos processes. Multi-modal control is a promising design tool for supervisory control of these type of systems by resorting to sequences of control modes each one comprising of a purposeful feedback law and a stopping condition. In this paper the problem of developing multi-modal control programs from a given mode alphabet using data from alternative disturbance scenarios is addressed. A case study related to maximizing the average productivity of a hybrid chemical plant through multi-modal control of a buffer tank is presented. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } Continuos-discrete processes are characterized by the strong coupling of continuos state dynamics of recipe-driven discontinuos operations and the discrete state dynamics of logic-based controllers acting at the interface with continuos processes. Multi-modal control is a promising design tool for supervisory control of these type of systems by resorting to sequences of control modes each one comprising of a purposeful feedback law and a stopping condition. In this paper the problem of developing multi-modal control programs from a given mode alphabet using data from alternative disturbance scenarios is addressed. A case study related to maximizing the average productivity of a hybrid chemical plant through multi-modal control of a buffer tank is presented. © 2009 Elsevier B.V. All rights reserved. |
S. Syafiie, F. Tadeo, E.C. Martínez Q(λ) learning technique for pH control (Conferencia) 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Syafiie2009712, title = {Q(λ) learning technique for pH control}, author = { S. Syafiie and F. Tadeo and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77949501128&partnerID=40&md5=9e040e7668f0883d71d99ff952f0532e}, doi = {10.1109/IEEM.2009.5373232}, year = {2009}, date = {2009-01-01}, journal = {IEEM 2009 - IEEE International Conference on Industrial Engineering and Engineering Management}, pages = {712-716}, abstract = {A simple learning control approach for controlling a pH neutralization process has been developed in this paper. A one-step-ahead Q-learning, namely Q(λ)-learning, using a lookup table is developed and applied to a pH process: weak base - strong acid process. The objective of the controller is to maintain the pH within the goal state. The application at a laboratory pilot plant shows that the proposed Q(λ)-learning regulates the process well, even in the presence of reference changes. ©2009 IEEE.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } A simple learning control approach for controlling a pH neutralization process has been developed in this paper. A one-step-ahead Q-learning, namely Q(λ)-learning, using a lookup table is developed and applied to a pH process: weak base - strong acid process. The objective of the controller is to maintain the pH within the goal state. The application at a laboratory pilot plant shows that the proposed Q(λ)-learning regulates the process well, even in the presence of reference changes. ©2009 IEEE. |
M. Cristaldi, R. Grau, E.C. Martínez Sequential design of dynamic experiments in modeling for optimization of biological processes (Artículo de revista) Computer Aided Chemical Engineering, 27 (C), pp. 369-374, 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Cristaldi2009369, title = {Sequential design of dynamic experiments in modeling for optimization of biological processes}, author = { M. Cristaldi and R. Grau and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77649280990&partnerID=40&md5=e833858456bf162d59e74e9463c8f4a1}, doi = {10.1016/S1570-7946(09)70282-2}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {27}, number = {C}, pages = {369-374}, abstract = {Finding optimal operating conditions fast with a scarce budget of experimental runs is a key problem to speed up the development and scaling up of innovative bioprocesses. A methodology for model-based design of dynamic experiments in modeling for optimization is proposed and successfully applied to the optimization of a fed-batch bioreactor related to the production of r-interleukin-11 whose DNA has been cloned in an E. coli strain. A library of tendency models is used to increasingly bias bioreactor operating conditions towards an optimum. Parametric uncertainty of tendency models is iteratively reduced using Global Sensitivity Analysis (GSA). At each iteration, the 'most informative' tendency model is used for designining the next dynamic experiment. Model selection is based on minimizing an error measure which separates parametric uncertainty from structural errors to trade-off exploration with exploitation. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Finding optimal operating conditions fast with a scarce budget of experimental runs is a key problem to speed up the development and scaling up of innovative bioprocesses. A methodology for model-based design of dynamic experiments in modeling for optimization is proposed and successfully applied to the optimization of a fed-batch bioreactor related to the production of r-interleukin-11 whose DNA has been cloned in an E. coli strain. A library of tendency models is used to increasingly bias bioreactor operating conditions towards an optimum. Parametric uncertainty of tendency models is iteratively reduced using Global Sensitivity Analysis (GSA). At each iteration, the 'most informative' tendency model is used for designining the next dynamic experiment. Model selection is based on minimizing an error measure which separates parametric uncertainty from structural errors to trade-off exploration with exploitation. © 2009 Elsevier B.V. All rights reserved. |
J. Palombarini, E.C. Martínez Learning to Repair Plans and Schedules Using a Relational (Deictic) Representation (Artículo de revista) Computer Aided Chemical Engineering, 27 (C), pp. 1377-1382, 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Palombarini20091377, title = {Learning to Repair Plans and Schedules Using a Relational (Deictic) Representation}, author = { J. Palombarini and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77649287212&partnerID=40&md5=338acf634c22494260b5cfc041a5dafb}, doi = {10.1016/S1570-7946(09)70620-0}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {27}, number = {C}, pages = {1377-1382}, abstract = {Unplanned and abnormal events may have a significant impact in the feasibility of plans and schedules which requires to repair them 'on-the-fly' to guarantee due date compliance of orders-in-progress and negotiating delivery conditions for new orders. In this work, a repair-based rescheduling approach based on the integration of intensive simulations with logical and relational reinforcement learning is proposed. Based on a deictic representation of schedule states a number of repair operators have been designed to guide the search for a goal state. The knowledge generated via simulation is encoded in a relational regression tree for the Q-value function defining the utility of applying a given repair operator at a given schedule state. A prototype implementation is discussed using a representative example of 3 batch extruders processing orders for 4 different products. The learning curve for the problem of inserting a new order vividly illustrates the advantages of logical and relational learning in rescheduling. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Unplanned and abnormal events may have a significant impact in the feasibility of plans and schedules which requires to repair them 'on-the-fly' to guarantee due date compliance of orders-in-progress and negotiating delivery conditions for new orders. In this work, a repair-based rescheduling approach based on the integration of intensive simulations with logical and relational reinforcement learning is proposed. Based on a deictic representation of schedule states a number of repair operators have been designed to guide the search for a goal state. The knowledge generated via simulation is encoded in a relational regression tree for the Q-value function defining the utility of applying a given repair operator at a given schedule state. A prototype implementation is discussed using a representative example of 3 batch extruders processing orders for 4 different products. The learning curve for the problem of inserting a new order vividly illustrates the advantages of logical and relational learning in rescheduling. © 2009 Elsevier B.V. All rights reserved. |
M. Rolón, M. Canavesio, E.C. Martínez Generative modeling of holonic manufacturing execution systems for batch plants (Artículo de revista) Computer Aided Chemical Engineering, 27 (C), pp. 795-800, 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rolón2009795, title = {Generative modeling of holonic manufacturing execution systems for batch plants}, author = { M. Rolón and M. Canavesio and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77649326093&partnerID=40&md5=f665488bd062bdc57281946d79d77173}, doi = {10.1016/S1570-7946(09)70353-0}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {27}, number = {C}, pages = {795-800}, abstract = {Most batch plants are constantly subjected to unplanned disruptive events such as arrivals of rush orders, raw material delays or equipment breakdowns along with a multitude of interactions in the supply chain demanding automatic on-line rescheduling and execution control. For responsiveness and agility at the shop-floor, a holonic batch control architecture based on autonomic units is proposed. A generative modeling of the proposed holonic manufacturing execution system (MES) is presented in order to evaluate its emerging behavior and macroscopic dynamics in a multiproduct batch plant. A simulation model of the proposed holonic MES for a case study was implemented in Netlogo. Different scenarios are considered to assess disturbance rejection capability by the holonic MES. Results obtained are very promising. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Most batch plants are constantly subjected to unplanned disruptive events such as arrivals of rush orders, raw material delays or equipment breakdowns along with a multitude of interactions in the supply chain demanding automatic on-line rescheduling and execution control. For responsiveness and agility at the shop-floor, a holonic batch control architecture based on autonomic units is proposed. A generative modeling of the proposed holonic manufacturing execution system (MES) is presented in order to evaluate its emerging behavior and macroscopic dynamics in a multiproduct batch plant. A simulation model of the proposed holonic MES for a case study was implemented in Netlogo. Different scenarios are considered to assess disturbance rejection capability by the holonic MES. Results obtained are very promising. © 2009 Elsevier B.V. All rights reserved. |
M.G. Cortés, E.G. Suáres, Y.C. Salgado, Y.A. Carvajal, V.G. Morales, G. Corsano Mass and energy integration in the sugar and alcohol production processes (Conferencia) 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Cortés20091441, title = {Mass and energy integration in the sugar and alcohol production processes}, author = { M.G. Cortés and E.G. Suáres and Y.C. Salgado and Y.A. Carvajal and V.G. Morales and G. Corsano}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84925071195&partnerID=40&md5=f1ad5ac9f045eb1400530c9406f4ff56}, year = {2009}, date = {2009-01-01}, journal = {ECOS 2009 - 22nd International Conference on Efficiency, Cost, Optimization, Simulation and Environmental Impact of Energy Systems}, pages = {1441-1448}, abstract = {Chemical industry is crossed a complex situation at world level currently. The capital shortage and the legislations in environmental matter are more and more sever. It is for that the industry is forced to make efficient their processes, using alternative raw matter, taking the advantages of resources that offer the own processes. On this way chemical industry optimize the use and the protection of the natural resources and to be more competitive in the international market. The sugar cane industry and derivative processes are not exempt of this situation. Most of the cases its productions cause a negative effect toward the environment. Mass and energy residues are disposal to the environment without an appropriate use of the resources and treatment of the waste resulting that in a low technical, economic efficiency and negative environmental impact. The goal of this work was centered in proposing alternative of mass and energy integration in the sugar and ethanol production in order to achieve more efficient productions. A strategy is developed for process integration and through mathematical modelling and optimization are identified the opportunities of exchange mass and energy resources, as well as the necessities of technological changes, achieving with it an integrated complex of more technical and environmental efficiency. Three alternatives are analyzed for process integration. The proposed redistribution of flows allows improvements in the quality of sugar and molasses use. Higher alcoholic yields and fermentative efficiencies are obtained in comparison with the traditional production processes of sugar and alcohol. A better redistribution of water is achieved and bigger energy efficiency is obtained in the integrated processes. The consumption of process steam is diminished and bigger bagasse surpluses are obtained. The extraction of juices diluted for the alcoholic fermentation diminishes the consumption of process steam in 22% with the system of three syrup and of 28% with the system of two syrup. The extraction of poor juices of sugar process production favors the clarified juice quality in the sugar factory, what rebounds positively in the efficiency of alcohol production process. Others important results are achieved such as bigger raw matter availability, a minimization of fresh water consumption and waste are obtained with lower indexes of contamination. © 2009 by ABCM.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Chemical industry is crossed a complex situation at world level currently. The capital shortage and the legislations in environmental matter are more and more sever. It is for that the industry is forced to make efficient their processes, using alternative raw matter, taking the advantages of resources that offer the own processes. On this way chemical industry optimize the use and the protection of the natural resources and to be more competitive in the international market. The sugar cane industry and derivative processes are not exempt of this situation. Most of the cases its productions cause a negative effect toward the environment. Mass and energy residues are disposal to the environment without an appropriate use of the resources and treatment of the waste resulting that in a low technical, economic efficiency and negative environmental impact. The goal of this work was centered in proposing alternative of mass and energy integration in the sugar and ethanol production in order to achieve more efficient productions. A strategy is developed for process integration and through mathematical modelling and optimization are identified the opportunities of exchange mass and energy resources, as well as the necessities of technological changes, achieving with it an integrated complex of more technical and environmental efficiency. Three alternatives are analyzed for process integration. The proposed redistribution of flows allows improvements in the quality of sugar and molasses use. Higher alcoholic yields and fermentative efficiencies are obtained in comparison with the traditional production processes of sugar and alcohol. A better redistribution of water is achieved and bigger energy efficiency is obtained in the integrated processes. The consumption of process steam is diminished and bigger bagasse surpluses are obtained. The extraction of juices diluted for the alcoholic fermentation diminishes the consumption of process steam in 22% with the system of three syrup and of 28% with the system of two syrup. The extraction of poor juices of sugar process production favors the clarified juice quality in the sugar factory, what rebounds positively in the efficiency of alcohol production process. Others important results are achieved such as bigger raw matter availability, a minimization of fresh water consumption and waste are obtained with lower indexes of contamination. © 2009 by ABCM. |
E.R. Henquín, J.M. Bisang Comparison between primary and secondary current distributions in bipolar electrochemical reactors (Artículo de revista) Journal of Applied Electrochemistry, 39 (10), pp. 1755-1762, 2009, (cited By 3). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Henquín20091755, title = {Comparison between primary and secondary current distributions in bipolar electrochemical reactors}, author = { E.R. Henquín and J.M. Bisang}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-73149085269&partnerID=40&md5=9b26257224a4b5a32f2490dcc3b18f06}, doi = {10.1007/s10800-009-9874-6}, year = {2009}, date = {2009-01-01}, journal = {Journal of Applied Electrochemistry}, volume = {39}, number = {10}, pages = {1755-1762}, abstract = {The primary and secondary current distributions in bipolar electrochemical reactors with recessed electrodes are compared. When the electrolyte of the different reactors of the stack is connected, and thus a leakage current is possible, the secondary current distribution is more pronounced than the primary one for the cases of industrial importance. In the absence of a leakage current the usual behavior of a monopolar system is observed. © 2009 Springer Science+Business Media B.V.}, note = {cited By 3}, keywords = {}, pubstate = {published}, tppubtype = {article} } The primary and secondary current distributions in bipolar electrochemical reactors with recessed electrodes are compared. When the electrolyte of the different reactors of the stack is connected, and thus a leakage current is possible, the secondary current distribution is more pronounced than the primary one for the cases of industrial importance. In the absence of a leakage current the usual behavior of a monopolar system is observed. © 2009 Springer Science+Business Media B.V. |
K.A. Torres, J. Espinosa The influence of tangent pinch points on The performance of batch rectifications: Development of a conceptual model for ternary mixtures (Artículo de revista) Computer Aided Chemical Engineering, 27 (C), pp. 963-968, 2009, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Torres2009963, title = {The influence of tangent pinch points on The performance of batch rectifications: Development of a conceptual model for ternary mixtures}, author = { K.A. Torres and J. Espinosa}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77649298197&partnerID=40&md5=c666f4523d5daa79d086e35676623257}, doi = {10.1016/S1570-7946(09)70381-5}, year = {2009}, date = {2009-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {27}, number = {C}, pages = {963-968}, abstract = {This contribution explores the influence of tangent pinch points on the performance of batch distillations of highly non ideal ternary mixtures and its incorporation into a conceptual modeling framework under the assumption of a batch rectifier with infinite number of stages. Two algorithms are developed to determine the maximum feasible distillate composition on the line of preferred separation and its corresponding limiting reflux ratio. The first one is based on a region elimination method (REM) and relates the occurrence of a saddle-node bifurcation to the appearance of a maximum in the curve reflux vs. molar liquid fraction of any of the components of the mixture. This method takes into account the bifurcation analysis of reversible distillation profiles. The second method is analytical and consists in solving an equation system that represents two facts: the liquid compositions of the bifurcation point, together with its vapor in equilibrium and its distillate must be aligned, and a turning point occurs. The resolution of the system is simplified because of the fact that in batch distillation of ternary mixtures operating at reflux below or equal to the limiting one, the bifurcation point coincides with the instantaneous still composition in the reboiler. Then, the dependence of feasible distillate mole fractions on reflux ratios above the limiting one is calculated by solving a non linear equation system, which incorporates the tangency condition and provides the unknown compositions of tangent pinches. A numerical method, based on the improved memory method, is implemented here to solve this non linear system. The conceptual model replaces the one based on the assumption of the invariance of a controlling saddle pinch, only valid for ideal systems and non ideal mixtures without tangent pinch points, and emphasizes the variation of the composition of the tangential pinch points for reflux above the limiting one. Results obtained from the conceptual model for instantaneous column performance are in excellent agreement with rigorous simulation in Hysys and highlight the effectiveness of the proposed algorithm. Two highly non ideal ternary mixtures are studied along the contribution: the systems methanol/2-propanol/water and acetone/chloroform/ benzene. © 2009 Elsevier B.V. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } This contribution explores the influence of tangent pinch points on the performance of batch distillations of highly non ideal ternary mixtures and its incorporation into a conceptual modeling framework under the assumption of a batch rectifier with infinite number of stages. Two algorithms are developed to determine the maximum feasible distillate composition on the line of preferred separation and its corresponding limiting reflux ratio. The first one is based on a region elimination method (REM) and relates the occurrence of a saddle-node bifurcation to the appearance of a maximum in the curve reflux vs. molar liquid fraction of any of the components of the mixture. This method takes into account the bifurcation analysis of reversible distillation profiles. The second method is analytical and consists in solving an equation system that represents two facts: the liquid compositions of the bifurcation point, together with its vapor in equilibrium and its distillate must be aligned, and a turning point occurs. The resolution of the system is simplified because of the fact that in batch distillation of ternary mixtures operating at reflux below or equal to the limiting one, the bifurcation point coincides with the instantaneous still composition in the reboiler. Then, the dependence of feasible distillate mole fractions on reflux ratios above the limiting one is calculated by solving a non linear equation system, which incorporates the tangency condition and provides the unknown compositions of tangent pinches. A numerical method, based on the improved memory method, is implemented here to solve this non linear system. The conceptual model replaces the one based on the assumption of the invariance of a controlling saddle pinch, only valid for ideal systems and non ideal mixtures without tangent pinch points, and emphasizes the variation of the composition of the tangential pinch points for reflux above the limiting one. Results obtained from the conceptual model for instantaneous column performance are in excellent agreement with rigorous simulation in Hysys and highlight the effectiveness of the proposed algorithm. Two highly non ideal ternary mixtures are studied along the contribution: the systems methanol/2-propanol/water and acetone/chloroform/ benzene. © 2009 Elsevier B.V. All rights reserved. |
K.A. Torres, J. Espinosa Incorporating tangent pinch points into the conceptual modeling of batch distillations: Ternary mixtures (Artículo de revista) Industrial and Engineering Chemistry Research, 48 (2), pp. 857-869, 2009, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Torres2009857, title = {Incorporating tangent pinch points into the conceptual modeling of batch distillations: Ternary mixtures}, author = { K.A. Torres and J. Espinosa}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-61649099342&partnerID=40&md5=747947674745fb38e1bfdb204087caf1}, doi = {10.1021/ie801169x}, year = {2009}, date = {2009-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {48}, number = {2}, pages = {857-869}, abstract = {This contribution explores the influence of tangent pinch points on the performance of batch distillations of highly nonideal ternary mixtures and its incorporation into a conceptual modeling framework under the assumption of a rectifier with an infinite number of stages. The maximum feasible distillate composition on the line of preferred separation and its corresponding limiting reflux ratio are first determined with the aid of bifurcation analysis of reversible distillation profiles. Then, the dependence of feasible distillate mole fractions on reflux ratios above the limiting one is calculated by solving a nonlinear equation system, which incorporates the tangency condition. Results obtained from the conceptual model for instantaneous column performance are in excellent agreement with those calculated from rigorous simulation. Two highly nonideal ternary mixtures are studied. © 2009 American Chemical Society.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } This contribution explores the influence of tangent pinch points on the performance of batch distillations of highly nonideal ternary mixtures and its incorporation into a conceptual modeling framework under the assumption of a rectifier with an infinite number of stages. The maximum feasible distillate composition on the line of preferred separation and its corresponding limiting reflux ratio are first determined with the aid of bifurcation analysis of reversible distillation profiles. Then, the dependence of feasible distillate mole fractions on reflux ratios above the limiting one is calculated by solving a nonlinear equation system, which incorporates the tangency condition. Results obtained from the conceptual model for instantaneous column performance are in excellent agreement with those calculated from rigorous simulation. Two highly nonideal ternary mixtures are studied. © 2009 American Chemical Society. |
D.M. Giménez, G.P. Henning, H.P. Leone A hierarchical product-property model to support product classification and manage structural and planning data (Artículo de revista) Lecture Notes in Business Information Processing, 24 LNBIP , pp. 639-650, 2009, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Giménez2009639, title = {A hierarchical product-property model to support product classification and manage structural and planning data}, author = { D.M. Giménez and G.P. Henning and H.P. Leone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-65949101039&partnerID=40&md5=53ad20cd2c183882408a3caad7a094ef}, doi = {10.1007/978-3-642-01347-8_53}, year = {2009}, date = {2009-01-01}, journal = {Lecture Notes in Business Information Processing}, volume = {24 LNBIP}, pages = {639-650}, abstract = {Mass customization is one of the main challenges that managers face since it results in a proliferation of product data within the various organizational areas of an enterprise and across different enterprises. Effective solutions to this problem have resorted to generic bills of materials and to the grouping of product variants into product families, thus improving data management and sharing. However, issues like product family identification and formation, as well as data aggregation have not been dealt with by this type of approach. This contribution addresses these challenges and proposes a hierarchical data model based on the concepts of variant, variant set and family. It allows managing huge amounts of structural and non-structural information in a systematic way, with minimum replication. Besides, it proposes an unambiguous criterion, based on the properties of variants, for identifying families and variant sets. Finally, the approach can explicitly handle aggregated data which is intrinsic to generic concepts like families and variant sets. A case study is analyzed to illustrate the representation capabilities of this approach. © 2009 Springer Berlin Heidelberg.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } Mass customization is one of the main challenges that managers face since it results in a proliferation of product data within the various organizational areas of an enterprise and across different enterprises. Effective solutions to this problem have resorted to generic bills of materials and to the grouping of product variants into product families, thus improving data management and sharing. However, issues like product family identification and formation, as well as data aggregation have not been dealt with by this type of approach. This contribution addresses these challenges and proposes a hierarchical data model based on the concepts of variant, variant set and family. It allows managing huge amounts of structural and non-structural information in a systematic way, with minimum replication. Besides, it proposes an unambiguous criterion, based on the properties of variants, for identifying families and variant sets. Finally, the approach can explicitly handle aggregated data which is intrinsic to generic concepts like families and variant sets. A case study is analyzed to illustrate the representation capabilities of this approach. © 2009 Springer Berlin Heidelberg. |